diff --git a/akka-docs/rst/experimental/index.rst b/akka-docs/rst/experimental/index.rst index 4d43da5b09..aacda42122 100644 --- a/akka-docs/rst/experimental/index.rst +++ b/akka-docs/rst/experimental/index.rst @@ -19,6 +19,7 @@ prior deprecation. .. toctree:: :maxdepth: 1 + ../scala/persistence ../dev/multi-node-testing Another reason for marking a module as experimental is that it's too early diff --git a/akka-docs/rst/java/code/docs/persistence/PersistenceDocTest.java b/akka-docs/rst/java/code/docs/persistence/PersistenceDocTest.java index 079ff4378a..7542a86351 100644 --- a/akka-docs/rst/java/code/docs/persistence/PersistenceDocTest.java +++ b/akka-docs/rst/java/code/docs/persistence/PersistenceDocTest.java @@ -139,7 +139,7 @@ public class PersistenceDocTest { if (message instanceof Persistent) { Persistent p = (Persistent)message; Persistent out = p.withPayload("done " + p.payload()); - channel.tell(Deliver.create(out, destination), getSelf()); + channel.tell(Deliver.create(out, destination.path()), getSelf()); } } } @@ -174,24 +174,35 @@ public class PersistenceDocTest { .withRedeliverInterval(Duration.create(30, TimeUnit.SECONDS)) .withRedeliverMax(15))); //#channel-custom-settings + + //#channel-custom-listener + class MyListener extends UntypedActor { + @Override + public void onReceive(Object message) throws Exception { + if (message instanceof RedeliverFailure) { + Iterable messages = + ((RedeliverFailure)message).getMessages(); + // ... + } + } + } + + final ActorRef myListener = getContext().actorOf(Props.create(MyListener.class)); + getContext().actorOf(Channel.props( + ChannelSettings.create().withRedeliverFailureListener(null))); + //#channel-custom-listener + } public void onReceive(Object message) throws Exception { if (message instanceof Persistent) { Persistent p = (Persistent)message; Persistent out = p.withPayload("done " + p.payload()); - channel.tell(Deliver.create(out, destination), getSelf()); + channel.tell(Deliver.create(out, destination.path()), getSelf()); //#channel-example-reply - channel.tell(Deliver.create(out, getSender()), getSelf()); + channel.tell(Deliver.create(out, getSender().path()), getSelf()); //#channel-example-reply - //#resolve-destination - channel.tell(Deliver.create(out, getSender(), Resolve.destination()), getSelf()); - //#resolve-destination - //#resolve-sender - channel.tell(Deliver.create(out, destination, Resolve.sender()), getSender()); - //#resolve-sender - } } } @@ -292,9 +303,13 @@ public class PersistenceDocTest { .withRedeliverInterval(Duration.create(30, TimeUnit.SECONDS)) .withRedeliverMax(15)), "myPersistentChannel"); - channel.tell(Deliver.create(Persistent.create("example"), destination), getSelf()); + channel.tell(Deliver.create(Persistent.create("example"), destination.path()), getSelf()); //#persistent-channel-example - + //#persistent-channel-watermarks + PersistentChannelSettings.create() + .withPendingConfirmationsMax(10000) + .withPendingConfirmationsMin(2000); + //#persistent-channel-watermarks //#persistent-channel-reply PersistentChannelSettings.create().withReplyPersistent(true); //#persistent-channel-reply @@ -318,7 +333,7 @@ public class PersistenceDocTest { // ... // reliably deliver events channel.tell(Deliver.create(Persistent.create( - event, getCurrentPersistentMessage()), destination), getSelf()); + event, getCurrentPersistentMessage()), destination.path()), getSelf()); } public void onReceiveReplay(Object msg) { @@ -339,4 +354,30 @@ public class PersistenceDocTest { } //#reliable-event-delivery }; + + static Object o9 = new Object() { + //#view + class MyView extends UntypedView { + @Override + public String processorId() { + return "some-processor-id"; + } + + @Override + public void onReceive(Object message) throws Exception { + if (message instanceof Persistent) { + // ... + } + } + } + //#view + + public void usage() { + final ActorSystem system = ActorSystem.create("example"); + //#view-update + final ActorRef view = system.actorOf(Props.create(MyView.class)); + view.tell(Update.create(true), null); + //#view-update + } + }; } diff --git a/akka-docs/rst/java/code/docs/persistence/PersistencePluginDocTest.java b/akka-docs/rst/java/code/docs/persistence/PersistencePluginDocTest.java index 962e40669b..821696e79a 100644 --- a/akka-docs/rst/java/code/docs/persistence/PersistencePluginDocTest.java +++ b/akka-docs/rst/java/code/docs/persistence/PersistencePluginDocTest.java @@ -77,22 +77,32 @@ public class PersistencePluginDocTest { class MyAsyncJournal extends AsyncWriteJournal { @Override - public Future doReplayAsync(String processorId, long fromSequenceNr, long toSequenceNr, Procedure replayCallback) { + public Future doAsyncWriteMessages(Iterable messages) { return null; } @Override - public Future doWriteAsync(Iterable persistentBatch) { + public Future doAsyncWriteConfirmations(Iterable confirmations) { return null; } @Override - public Future doDeleteAsync(String processorId, long fromSequenceNr, long toSequenceNr, boolean permanent) { + public Future doAsyncDeleteMessages(Iterable messageIds, boolean permanent) { return null; } @Override - public Future doConfirmAsync(String processorId, long sequenceNr, String channelId) { + public Future doAsyncDeleteMessagesTo(String processorId, long toSequenceNr, boolean permanent) { + return null; + } + + @Override + public Future doAsyncReplayMessages(String processorId, long fromSequenceNr, long toSequenceNr, long max, Procedure replayCallback) { + return null; + } + + @Override + public Future doAsyncReadHighestSequenceNr(String processorId, long fromSequenceNr) { return null; } } diff --git a/akka-docs/rst/java/persistence.rst b/akka-docs/rst/java/persistence.rst index f03032eddf..900d743362 100644 --- a/akka-docs/rst/java/persistence.rst +++ b/akka-docs/rst/java/persistence.rst @@ -5,21 +5,13 @@ Persistence ########### Akka persistence enables stateful actors to persist their internal state so that it can be recovered when an actor -is started, restarted by a supervisor or migrated in a cluster. It also allows stateful actors to recover from JVM -crashes, for example. The key concept behind Akka persistence is that only changes to an actor's internal state are -persisted but never its current state directly (except for optional snapshots). These changes are only ever appended -to storage, nothing is ever mutated, which allows for very high transaction rates and efficient replication. Stateful -actors are recovered by replaying stored changes to these actors from which they can rebuild internal state. This can -be either the full history of changes or starting from a snapshot of internal actor state which can dramatically -reduce recovery times. Akka persistence also provides point-to-point communication channels with at-least-once -message delivery guarantees. - -Storage backends for state changes and snapshots are pluggable in Akka persistence. Currently, these are written to -the local filesystem. Distributed and replicated storage, with the possibility of scaling writes, will be available -soon. - -Akka persistence is inspired by the `eventsourced`_ library. It follows the same concepts and architecture of -`eventsourced`_ but significantly differs on API and implementation level. +is started, restarted after a JVM crash or by a supervisor, or migrated in a cluster. The key concept behind Akka +persistence is that only changes to an actor's internal state are persisted but never its current state directly +(except for optional snapshots). These changes are only ever appended to storage, nothing is ever mutated, which +allows for very high transaction rates and efficient replication. Stateful actors are recovered by replaying stored +changes to these actors from which they can rebuild internal state. This can be either the full history of changes +or starting from a snapshot which can dramatically reduce recovery times. Akka persistence also provides point-to-point +communication channels with at-least-once message delivery semantics. .. warning:: @@ -28,6 +20,9 @@ Akka persistence is inspired by the `eventsourced`_ library. It follows the same changes to a minimum the binary compatibility guarantee for maintenance releases does not apply to the contents of the ``akka.persistence`` package. +Akka persistence is inspired by the `eventsourced`_ library. It follows the same concepts and architecture of +`eventsourced`_ but significantly differs on API and implementation level. + .. _eventsourced: https://github.com/eligosource/eventsourced Dependencies @@ -48,16 +43,22 @@ Architecture before its ``onReceive`` method is called. When a processor is started or restarted, journaled messages are replayed to that processor, so that it can recover internal state from these messages. -* *Channel*: Channels are used by processors to communicate with other actors. They prevent that replayed messages - are redundantly delivered to these actors and provide at-least-once message delivery guarantees, also in case of - sender and receiver JVM crashes. +* *View*: A view is a persistent, stateful actor that receives journaled messages that have been written by another + processor. A view itself does not journal new messages, instead, it updates internal state only from a processor's + replicated message stream. + +* *Channel*: Channels are used by processors and views to communicate with other actors. They prevent that replayed + messages are redundantly delivered to these actors and provide at-least-once message delivery semantics, also in + case of sender and receiver JVM crashes. * *Journal*: A journal stores the sequence of messages sent to a processor. An application can control which messages - are stored and which are received by the processor without being journaled. The storage backend of a journal is - pluggable. + are journaled and which are received by the processor without being journaled. The storage backend of a journal is + pluggable. The default journal storage plugin writes to the local filesystem, replicated journals are available as + :ref:`community-projects-java`. -* *Snapshot store*: A snapshot store persists snapshots of a processor's internal state. Snapshots are used for - optimizing recovery times. The storage backend of a snapshot store is pluggable. +* *Snapshot store*: A snapshot store persists snapshots of a processor's or a view's internal state. Snapshots are + used for optimizing recovery times. The storage backend of a snapshot store is pluggable. The default snapshot + storage plugin writes to the local filesystem. * *Event sourcing*. Based on the building blocks described above, Akka persistence provides abstractions for the development of event sourced applications (see section :ref:`event-sourcing-java`) @@ -75,10 +76,9 @@ A processor can be implemented by extending the abstract ``UntypedProcessor`` cl Processors only write messages of type ``Persistent`` to the journal, others are received without being persisted. When a processor's ``onReceive`` method is called with a ``Persistent`` message it can safely assume that this message has been successfully written to the journal. If a journal fails to write a ``Persistent`` message then the processor -is stopped, by default. If an application wants that a processors continues to run on persistence failures it must -handle ``PersistenceFailure`` messages. In this case, a processor may want to inform the sender about the failure, -so that the sender can re-send the message, if needed, under the assumption that the journal recovered from a -temporary failure. +is stopped, by default. If a processor should continue running on persistence failures it must handle +``PersistenceFailure`` messages. In this case, a processor may want to inform the sender about the failure, +so that the sender can re-send the message, if needed. An ``UntypedProcessor`` itself is an ``Actor`` and can therefore be instantiated with ``actorOf``. @@ -87,9 +87,9 @@ An ``UntypedProcessor`` itself is an ``Actor`` and can therefore be instantiated Recovery -------- -By default, a processor is automatically recovered on start and on restart by replaying persistent messages. +By default, a processor is automatically recovered on start and on restart by replaying journaled messages. New messages sent to a processor during recovery do not interfere with replayed messages. New messages will -only be received by that processor after recovery completes. +only be received by a processor after recovery completes. Recovery customization ^^^^^^^^^^^^^^^^^^^^^^ @@ -137,7 +137,7 @@ that message as argument. An optional ``permanent`` parameter specifies whether deleted from the journal or only marked as deleted. In both cases, the message won't be replayed. Later extensions to Akka persistence will allow to replay messages that have been marked as deleted which can be useful for debugging purposes, for example. To delete all messages (journaled by a single processor) up to a specified sequence number, -processors can call the ``deleteMessages`` method. +processors should call the ``deleteMessages`` method. Identifiers ----------- @@ -150,41 +150,103 @@ method. Applications can customize a processor's id by specifying an actor name during processor creation as shown in section :ref:`processors-java`. This changes that processor's name in its actor hierarchy and hence influences only -part of the processor id. To fully customize a processor's id, the ``processorId`` method should be overridden. +part of the processor id. To fully customize a processor's id, the ``processorId`` method must be overridden. .. includecode:: code/docs/persistence/PersistenceDocTest.java#processor-id-override +Overriding ``processorId`` is the recommended way to generate stable identifiers. + +.. _views-java: + +Views +===== + +Views can be implemented by extending the ``UntypedView`` trait and implementing the ``onReceive`` and the ``processorId`` +methods. + +.. includecode:: code/docs/persistence/PersistenceDocTest.java#view + +The ``processorId`` identifies the processor from which the view receives journaled messages. It is not necessary +the referenced processor is actually running. Views read messages from a processor's journal directly. When a +processor is started later and begins to write new messages, the corresponding view is updated automatically, by +default. + +Updates +------- + +The default update interval of all views of an actor system is configurable: + +.. includecode:: ../scala/code/docs/persistence/PersistenceDocSpec.scala#auto-update-interval + +``View`` implementation classes may also override the ``autoUpdateInterval`` method to return a custom update +interval for a specific view class or view instance. Applications may also trigger additional updates at +any time by sending a view an ``Update`` message. + +.. includecode:: code/docs/persistence/PersistenceDocTest.java#view-update + +If the ``await`` parameter is set to ``true``, messages that follow the ``Update`` request are processed when the +incremental message replay, triggered by that update request, completed. If set to ``false`` (default), messages +following the update request may interleave with the replayed message stream. Automated updates always run with +``await = false``. + +Automated updates of all views of an actor system can be turned off by configuration: + +.. includecode:: ../scala/code/docs/persistence/PersistenceDocSpec.scala#auto-update + +Implementation classes may override the configured default value by overriding the ``autoUpdate`` method. To +limit the number of replayed messages per update request, applications can configure a custom +``akka.persistence.view.auto-update-replay-max`` value or override the ``autoUpdateReplayMax`` method. The number +of replayed messages for manual updates can be limited with the ``replayMax`` parameter of the ``Update`` message. + +Recovery +-------- + +Initial recovery of views works in the very same way as for :ref:`processors` (i.e. by sending a ``Recover`` message +to self). The maximum number of replayed messages during initial recovery is determined by ``autoUpdateReplayMax``. +Further possibilities to customize initial recovery are explained in section :ref:`processors-java`. + +Identifiers +----------- + +A view must have an identifier that doesn't change across different actor incarnations. It defaults to the +``String`` representation of the actor path without the address part and can be obtained via the ``viewId`` +method. + +Applications can customize a view's id by specifying an actor name during view creation. This changes that view's +name in its actor hierarchy and hence influences only part of the view id. To fully customize a view's id, the +``viewId`` method must be overridden. Overriding ``viewId`` is the recommended way to generate stable identifiers. + +The ``viewId`` must differ from the referenced ``processorId``, unless :ref:`snapshots-java` of a view and its +processor shall be shared (which is what applications usually do not want). + .. _channels-java: Channels ======== -.. warning:: +Channels are special actors that are used by processors or views to communicate with other actors (channel +destinations). The following discusses channels in context of processors but this is also applicable to views. - There are further changes planned to the channel API that couldn't make it into the current milestone. - One example is to have only a single destination per channel to allow gap detection and more advanced - flow control. - -Channels are special actors that are used by processors to communicate with other actors (channel destinations). Channels prevent redundant delivery of replayed messages to destinations during processor recovery. A replayed -message is retained by a channel if its previous delivery has been confirmed by a destination. +message is retained by a channel if its delivery has been confirmed by a destination. .. includecode:: code/docs/persistence/PersistenceDocTest.java#channel-example A channel is ready to use once it has been created, no recovery or further activation is needed. A ``Deliver`` -request instructs a channel to send a ``Persistent`` message to a destination. Sender references are preserved -by a channel, therefore, a destination can reply to the sender of a ``Deliver`` request. +request instructs a channel to send a ``Persistent`` message to a destination. A destination is provided as +``ActorPath`` and messages are sent by the channel via that path's ``ActorSelection``. Sender references are +preserved by a channel, therefore, a destination can reply to the sender of a ``Deliver`` request. -If a processor wants to reply to a ``Persistent`` message sender it should use the ``getSender()`` reference as +If a processor wants to reply to a ``Persistent`` message sender it should use the ``getSender()`` path as channel destination. .. includecode:: code/docs/persistence/PersistenceDocTest.java#channel-example-reply Persistent messages delivered by a channel are of type ``ConfirmablePersistent``. ``ConfirmablePersistent`` extends -``Persistent`` by adding the methods ``confirm`` method and ``redeliveries`` (see also :ref:`redelivery-java`). Channel -destinations confirm the delivery of a ``ConfirmablePersistent`` message by calling ``confirm()`` an that message. -This asynchronously writes a confirmation entry to the journal. Replayed messages internally contain these confirmation -entries which allows a channel to decide if a message should be retained or not. +``Persistent`` by adding the methods ``confirm`` and ``redeliveries`` (see also :ref:`redelivery-java`). A channel +destination confirms the delivery of a ``ConfirmablePersistent`` message by calling ``confirm()`` on that message. +This asynchronously writes a confirmation entry to the journal. Replayed messages internally contain confirmation +entries which allows a channel to decide if it should retain these messages or not. A ``Processor`` can also be used as channel destination i.e. it can persist ``ConfirmablePersistent`` messages too. @@ -193,25 +255,23 @@ A ``Processor`` can also be used as channel destination i.e. it can persist ``Co Message re-delivery ------------------- -Channels re-deliver messages to destinations if they do not confirm their receipt within a configurable timeout. +Channels re-deliver messages to destinations if they do not confirm delivery within a configurable timeout. This timeout can be specified as ``redeliverInterval`` when creating a channel, optionally together with the -maximum number of re-deliveries a channel should attempt for each unconfirmed message. +maximum number of re-deliveries a channel should attempt for each unconfirmed message. The number of re-delivery +attempts can be obtained via the ``redeliveries`` method on ``ConfirmablePersistent``. .. includecode:: code/docs/persistence/PersistenceDocTest.java#channel-custom-settings -Message re-delivery is done out of order with regards to normal delivery i.e. redelivered messages may arrive -later than newer normally delivered messages. The number of re-delivery attempts can be obtained via the -``redeliveries`` method on ``ConfirmablePersistent``. +A channel keeps messages in memory until their successful delivery has been confirmed or the maximum number of +re-deliveries is reached. To be notified about messages that have reached the maximum number of re-deliveries, +applications can register a listener at channel creation. -A channel keeps messages in memory until their successful delivery has been confirmed by their destination(s) -or their maximum number of re-deliveries is reached. In the latter case, the application has to re-send the -correspnding ``Deliver`` request to the channel so that the channel can start a new series of delivery attempts -(starting again with a ``redeliveries`` count of ``0``). +.. includecode:: code/docs/persistence/PersistenceDocTest.java#channel-custom-listener -Re-sending ``Deliver`` requests is done automatically if the sending processor replays messages: only ``Deliver`` -requests of unconfirmed messages will be served again by the channel. A message replay can be enforced by an -application by restarting the sending processor, for example. A replay will also take place if the whole -application is restarted, either after normal termination or after a crash. +A listener receives ``RedeliverFailure`` notifications containing all messages that could not be delivered. On +receiving a ``RedeliverFailure`` message, an application may decide to restart the sending processor to enforce +a re-send of these messages to the channel or confirm these messages to prevent further re-sends. The sending +processor can also be restarted any time later to re-send unconfirmed messages. This combination of @@ -220,7 +280,7 @@ This combination of * message re-deliveries by channels and * application-level confirmations (acknowledgements) by destinations -enables channels to provide at-least-once message delivery guarantees. Possible duplicates can be detected by +enables channels to provide at-least-once message delivery semantics. Possible duplicates can be detected by destinations by tracking message sequence numbers. Message sequence numbers are generated per sending processor. Depending on how a processor routes outbound messages to destinations, they may either see a contiguous message sequence or a sequence with gaps. @@ -229,14 +289,13 @@ sequence or a sequence with gaps. If a processor emits more than one outbound message per inbound ``Persistent`` message it **must** use a separate channel for each outbound message to ensure that confirmations are uniquely identifiable, otherwise, - at-least-once message delivery is not guaranteed. This rule has been introduced to avoid writing additional + at-least-once message delivery semantics do not apply. This rule has been introduced to avoid writing additional outbound message identifiers to the journal which would decrease the overall throughput. It is furthermore recommended to collapse multiple outbound messages to the same destination into a single outbound message, - otherwise, if sent via multiple channels, their ordering is not defined. These restrictions are likely to be - removed in the final release. + otherwise, if sent via multiple channels, their ordering is not defined. -Whenever an application wants to have more control how sequence numbers are assigned to messages it should use -an application-specific sequence number generator and include the generated sequence numbers into the ``payload`` +If an application wants to have more control how sequence numbers are assigned to messages it should use an +application-specific sequence number generator and include the generated sequence numbers into the ``payload`` of ``Persistent`` messages. Persistent channels @@ -246,60 +305,45 @@ Channels created with ``Channel.props`` do not persist messages. These channels with a sending processor that takes care of persistence, hence, channel-specific persistence is not necessary in this case. They are referred to as transient channels in the following. -Applications may also use transient channels standalone (i.e. without a sending processor) if re-delivery attempts -to destinations are required but message loss in case of a sender JVM crash is not an issue. If applications want to -use standalone channels but message loss is not acceptable, they should use persistent channels. A persistent channel -can be created with ``PersistentChannel.props`` and configured with a ``PersistentChannelSettings`` object. +Persistent channels are like transient channels but additionally persist messages before delivering them. Messages +that have been persisted by a persistent channel are deleted when destinations confirm their delivery. A persistent +channel can be created with ``PersistentChannel.props`` and configured with a ``PersistentChannelSettings`` object. .. includecode:: code/docs/persistence/PersistenceDocTest.java#persistent-channel-example -A persistent channel is like a transient channel that additionally persists ``Deliver`` requests before serving it. -Hence, it can recover from sender JVM crashes and provide the same message re-delivery semantics as a transient -channel in combination with an application-defined processor. +A persistent channel is useful for delivery of messages to slow destinations or destinations that are unavailable +for a long time. It can constrain the number of pending confirmations based on the ``pendingConfirmationsMax`` +and ``pendingConfirmationsMin`` parameters of ``PersistentChannelSettings``. -By default, a persistent channel doesn't reply whether a ``Persistent`` message, sent with ``Deliver``, has been -successfully persisted or not. This can be enabled by creating the channel with the ``replyPersistent`` configuration -parameter set to ``true``: +.. includecode:: code/docs/persistence/PersistenceDocTest.java#persistent-channel-watermarks + +It suspends delivery when the number of pending confirmations reaches ``pendingConfirmationsMax`` and resumes +delivery again when this number falls below ``pendingConfirmationsMin``. This prevents both, flooding destinations +with more messages than they can process and unlimited memory consumption by the channel. A persistent channel +continues to persist new messages even when message delivery is temporarily suspended. + +Standalone usage +---------------- + +Applications may also use channels standalone. Transient channels can be used standalone if re-delivery attempts +to destinations are required but message loss in case of a sender JVM crash is not an issue. If message loss in +case of a sender JVM crash is an issue, persistent channels should be used. In this case, applications may want to +receive replies from the channel whether messages have been successfully persisted or not. This can be enabled by +creating the channel with the ``replyPersistent`` configuration parameter set to ``true``: .. includecode:: code/docs/persistence/PersistenceDocTest.java#persistent-channel-reply -With this setting, either the successfully persisted message is replied to the sender or a ``PersistenceFailure``. -In case of a persistence failure, the sender should re-send the message. - -Using a persistent channel in combination with an application-defined processor can make sense if destinations are -unavailable for a long time and an application doesn't want to buffer all messages in memory (but write them to the -journal only). In this case, delivery can be disabled by sending the channel a ``DisableDelivery`` message (to -stop delivery and persist-only) and re-enabled again by sending it an ``EnableDelivery`` message. A disabled channel -that receives an ``EnableDelivery`` message, processes all persisted, unconfirmed ``Deliver`` requests again before -serving new ones. - -Sender resolution ------------------ - -``ActorRef`` s of ``Persistent`` message senders are also stored in the journal. Consequently, they may become invalid if -an application is restarted and messages are replayed. For example, the stored ``ActorRef`` may then reference -a previous incarnation of a sender and a new incarnation of that sender cannot receive a reply from a processor. -This may be acceptable for many applications but others may require that a new sender incarnation receives the -reply (to reliably resume a conversation between actors after a JVM crash, for example). Here, a channel may -assist in resolving new sender incarnations by specifying a third ``Deliver`` argument: - -* ``Resolve.destination()`` if the sender of a persistent message is used as channel destination - - .. includecode:: code/docs/persistence/PersistenceDocTest.java#resolve-destination - -* ``Resolve.sender()`` if the sender of a persistent message is forwarded to a destination. - - .. includecode:: code/docs/persistence/PersistenceDocTest.java#resolve-sender - -Default is ``Resolve.off()`` which means no resolution. Find out more in the ``Deliver`` API docs. +With this setting, either the successfully persisted message is replied to the sender or a ``PersistenceFailure`` +message. In case the latter case, the sender should re-send the message. Identifiers ----------- -In the same way as :ref:`processors`, channels also have an identifier that defaults to a channel's path. A channel -identifier can therefore be customized by using a custom actor name at channel creation. This changes that channel's -name in its actor hierarchy and hence influences only part of the channel identifier. To fully customize a channel -identifier, it should be provided as argument ``Channel.props(String)`` or ``PersistentChannel.props(String)``. +In the same way as :ref:`processors-java` and :ref:`views-java`, channels also have an identifier that defaults to a channel's +path. A channel identifier can therefore be customized by using a custom actor name at channel creation. This changes +that channel's name in its actor hierarchy and hence influences only part of the channel identifier. To fully customize +a channel identifier, it should be provided as argument ``Channel.props(String)`` or ``PersistentChannel.props(String)`` +(recommended to generate stable identifiers). .. includecode:: code/docs/persistence/PersistenceDocTest.java#channel-id-override @@ -326,16 +370,18 @@ Sequence number The sequence number of a ``Persistent`` message can be obtained via its ``sequenceNr`` method. Persistent messages are assigned sequence numbers on a per-processor basis (or per channel basis if used -standalone). A sequence starts at ``1L`` and doesn't contain gaps unless a processor deletes a message. +standalone). A sequence starts at ``1L`` and doesn't contain gaps unless a processor deletes messages. .. _snapshots-java: Snapshots ========= -Snapshots can dramatically reduce recovery times. Processors can save snapshots of internal state by calling the -``saveSnapshot`` method on ``Processor``. If saving of a snapshot succeeds, the processor will receive a -``SaveSnapshotSuccess`` message, otherwise a ``SaveSnapshotFailure`` message. +Snapshots can dramatically reduce recovery times of processors and views. The following discusses snapshots +in context of processors but this is also applicable to views. + +Processors can save snapshots of internal state by calling the ``saveSnapshot`` method. If saving of a snapshot +succeeds, the processor receives a ``SaveSnapshotSuccess`` message, otherwise a ``SaveSnapshotFailure`` message .. includecode:: code/docs/persistence/PersistenceDocTest.java#save-snapshot @@ -359,9 +405,9 @@ saved snapshot matches the specified ``SnapshotSelectionCriteria`` will replay a Snapshot deletion ----------------- -A processor can delete a single snapshot by calling the ``deleteSnapshot`` method with the sequence number and the -timestamp of the snapshot as argument. To bulk-delete snapshots that match a specified ``SnapshotSelectionCriteria`` -argument, processors can call the ``deleteSnapshots`` method. +A processor can delete individual snapshots by calling the ``deleteSnapshot`` method with the sequence number and the +timestamp of a snapshot as argument. To bulk-delete snapshots matching ``SnapshotSelectionCriteria``, processors should +use the ``deleteSnapshots`` method. .. _event-sourcing-java: @@ -389,7 +435,7 @@ Akka persistence supports event sourcing with the abstract ``UntypedEventsourced event sourcing as a pattern on top of command sourcing). A processor that extends this abstract class does not handle ``Persistent`` messages directly but uses the ``persist`` method to persist and handle events. The behavior of an ``UntypedEventsourcedProcessor`` is defined by implementing ``onReceiveReplay`` and ``onReceiveCommand``. This is -best explained with an example (which is also part of ``akka-sample-persistence``). +demonstrated in the following example. .. includecode:: ../../../akka-samples/akka-sample-persistence/src/main/java/sample/persistence/japi/EventsourcedExample.java#eventsourced-example @@ -402,28 +448,25 @@ a command is handled by generating two events which are then persisted and handl ``persist`` with an event (or a sequence of events) as first argument and an event handler as second argument. The ``persist`` method persists events asynchronously and the event handler is executed for successfully persisted -events. Successfully persisted events are internally sent back to the processor as separate messages which trigger -the event handler execution. An event handler may therefore close over processor state and mutate it. The sender -of a persisted event is the sender of the corresponding command. This allows event handlers to reply to the sender -of a command (not shown). +events. Successfully persisted events are internally sent back to the processor as individual messages that trigger +event handler executions. An event handler may close over processor state and mutate it. The sender of a persisted +event is the sender of the corresponding command. This allows event handlers to reply to the sender of a command +(not shown). The main responsibility of an event handler is changing processor state using event data and notifying others about successful state changes by publishing events. -When persisting events with ``persist`` it is guaranteed that the processor will not receive new commands between +When persisting events with ``persist`` it is guaranteed that the processor will not receive further commands between the ``persist`` call and the execution(s) of the associated event handler. This also holds for multiple ``persist`` -calls in context of a single command. - -The example also demonstrates how to change the processor's default behavior, defined by ``onReceiveCommand``, to -another behavior, defined by ``otherCommandHandler``, and back using ``getContext().become()`` and -``getContext().unbecome()``. See also the API docs of ``persist`` for further details. +calls in context of a single command. The example also shows how to switch between command different command handlers +with ``getContext().become()`` and ``getContext().unbecome()``. Reliable event delivery ----------------------- -Sending events from an event handler to another actor directly doesn't guarantee delivery of these events. To -guarantee at-least-once delivery, :ref:`channels-java` must be used. In this case, also replayed events (received by -``receiveReplay``) must be sent to a channel, as shown in the following example: +Sending events from an event handler to another actor has at-most-once delivery semantics. For at-least-once delivery, +:ref:`channels-java` must be used. In this case, also replayed events (received by ``receiveReplay``) must be sent to a +channel, as shown in the following example: .. includecode:: code/docs/persistence/PersistenceDocTest.java#reliable-event-delivery @@ -438,29 +481,33 @@ To optimize throughput, an ``UntypedProcessor`` internally batches received ``Pe writing them to the journal (as a single batch). The batch size dynamically grows from 1 under low and moderate loads to a configurable maximum size (default is ``200``) under high load. -.. includecode:: ../scala/code/docs/persistence/PersistencePluginDocSpec.scala#max-batch-size +.. includecode:: ../scala/code/docs/persistence/PersistencePluginDocSpec.scala#max-message-batch-size A new batch write is triggered by a processor as soon as a batch reaches the maximum size or if the journal completed -writing the previous batch. Batch writes are never timer-based which keeps latencies as low as possible. +writing the previous batch. Batch writes are never timer-based which keeps latencies at a minimum. Applications that want to have more explicit control over batch writes and batch sizes can send processors ``PersistentBatch`` messages. .. includecode:: code/docs/persistence/PersistenceDocTest.java#batch-write -``Persistent`` messages contained in a ``PersistentBatch`` message are always written atomically, even if the batch -size is greater than ``max-batch-size``. Also, a ``PersistentBatch`` is written isolated from other batches. +``Persistent`` messages contained in a ``PersistentBatch`` are always written atomically, even if the batch +size is greater than ``max-message-batch-size``. Also, a ``PersistentBatch`` is written isolated from other batches. ``Persistent`` messages contained in a ``PersistentBatch`` are received individually by a processor. ``PersistentBatch`` messages, for example, are used internally by an ``UntypedEventsourcedProcessor`` to ensure atomic -writes of events. All events that are persisted in context of a single command are written as single batch to the +writes of events. All events that are persisted in context of a single command are written as a single batch to the journal (even if ``persist`` is called multiple times per command). The recovery of an ``UntypedEventsourcedProcessor`` -will therefore never be done partially i.e. with only a subset of events persisted by a single command. +will therefore never be done partially (with only a subset of events persisted by a single command). + +Confirmation and deletion operations performed by :ref:`channels-java` are also batched. The maximum confirmation +and deletion batch sizes are configurable with ``akka.persistence.journal.max-confirmation-batch-size`` and +``akka.persistence.journal.max-deletion-batch-size``, respectively. Storage plugins =============== -Storage backends for journals and snapshot stores are plugins in akka-persistence. The default journal plugin +Storage backends for journals and snapshot stores are pluggable in Akka persistence. The default journal plugin writes messages to LevelDB (see :ref:`local-leveldb-journal-java`). The default snapshot store plugin writes snapshots as individual files to the local filesystem (see :ref:`local-snapshot-store-java`). Applications can provide their own plugins by implementing a plugin API and activate them by configuration. Plugin development requires the following @@ -472,19 +519,19 @@ Journal plugin API ------------------ A journal plugin either extends ``SyncWriteJournal`` or ``AsyncWriteJournal``. ``SyncWriteJournal`` is an -actor that should be extended when the storage backend API only supports synchronous, blocking writes. The -methods to be implemented in this case are: +actor that should be extended when the storage backend API only supports synchronous, blocking writes. In this +case, the methods to be implemented are: .. includecode:: ../../../akka-persistence/src/main/java/akka/persistence/journal/japi/SyncWritePlugin.java#sync-write-plugin-api ``AsyncWriteJournal`` is an actor that should be extended if the storage backend API supports asynchronous, -non-blocking writes. The methods to be implemented in that case are: +non-blocking writes. In this case, the methods to be implemented are: .. includecode:: ../../../akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncWritePlugin.java#async-write-plugin-api -Message replays are always asynchronous, therefore, any journal plugin must implement: +Message replays and sequence number recovery are always asynchronous, therefore, any journal plugin must implement: -.. includecode:: ../../../akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncReplayPlugin.java#async-replay-plugin-api +.. includecode:: ../../../akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncRecoveryPlugin.java#async-replay-plugin-api A journal plugin can be activated with the following minimal configuration: @@ -530,15 +577,15 @@ Shared LevelDB journal ---------------------- A LevelDB instance can also be shared by multiple actor systems (on the same or on different nodes). This, for -example, allows processors to failover to a backup node, assuming that the node, where the shared instance is -runnning, is accessible from the backup node. +example, allows processors to failover to a backup node and continue using the shared journal instance from the +backup node. .. warning:: A shared LevelDB instance is a single point of failure and should therefore only be used for testing - purposes. + purposes. Highly-available, replicated journal are available as :ref:`community-projects-java`. -A shared LevelDB instance can be created by instantiating the ``SharedLeveldbStore`` actor. +A shared LevelDB instance is started by instantiating the ``SharedLeveldbStore`` actor. .. includecode:: code/docs/persistence/PersistencePluginDocTest.java#shared-store-creation @@ -565,12 +612,21 @@ i.e. only the first injection is used. Local snapshot store -------------------- -The default snapshot store plugin is ``akka.persistence.snapshot-store.local`` which writes snapshot files to +The default snapshot store plugin is ``akka.persistence.snapshot-store.local``. It writes snapshot files to the local filesystem. The default storage location is a directory named ``snapshots`` in the current working directory. This can be changed by configuration where the specified path can be relative or absolute: .. includecode:: ../scala/code/docs/persistence/PersistencePluginDocSpec.scala#snapshot-config +.. _community-projects-java: + +Community plugins +----------------- + +* `Replicated journal backed by Apache Cassandra `_. +* `Replicated journal backed by Apache HBase `_. +* `Replicated journal backed by MongoDB `_. + Custom serialization ==================== @@ -584,8 +640,7 @@ it must add .. includecode:: ../scala/code/docs/persistence/PersistenceSerializerDocSpec.scala#custom-serializer-config -to the application configuration. If not specified, a default serializer is used, which is the ``JavaSerializer`` -in this example. +to the application configuration. If not specified, a default serializer is used. Testing ======= @@ -599,5 +654,4 @@ or .. includecode:: ../scala/code/docs/persistence/PersistencePluginDocSpec.scala#shared-store-native-config -in your Akka configuration. The latter setting applies if you're using a :ref:`shared-leveldb-journal-java`. The LevelDB -Java port is for testing purposes only. +in your Akka configuration. The LevelDB Java port is for testing purposes only. diff --git a/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala b/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala index 2854db0e46..029f54591b 100644 --- a/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala @@ -7,10 +7,20 @@ package docs.persistence import scala.concurrent.duration._ import scala.language.postfixOps -import akka.actor.ActorSystem +import akka.actor.{ Actor, ActorSystem } import akka.persistence._ trait PersistenceDocSpec { + val config = + """ + //#auto-update-interval + akka.persistence.view.auto-update-interval = 5s + //#auto-update-interval + //#auto-update + akka.persistence.view.auto-update = off + //#auto-update + """ + val system: ActorSystem import system._ @@ -110,7 +120,7 @@ trait PersistenceDocSpec { def receive = { case p @ Persistent(payload, _) => - channel ! Deliver(p.withPayload(s"processed ${payload}"), destination) + channel ! Deliver(p.withPayload(s"processed ${payload}"), destination.path) } } @@ -124,8 +134,6 @@ trait PersistenceDocSpec { //#channel-example class MyProcessor2 extends Processor { - import akka.persistence.Resolve - val destination = context.actorOf(Props[MyDestination]) val channel = //#channel-id-override @@ -141,15 +149,21 @@ trait PersistenceDocSpec { def receive = { case p @ Persistent(payload, _) => //#channel-example-reply - channel ! Deliver(p.withPayload(s"processed ${payload}"), sender) - //#channel-example-reply - //#resolve-destination - channel ! Deliver(p, sender, Resolve.Destination) - //#resolve-destination - //#resolve-sender - channel forward Deliver(p, destination, Resolve.Sender) - //#resolve-sender + channel ! Deliver(p.withPayload(s"processed ${payload}"), sender.path) + //#channel-example-reply } + + //#channel-custom-listener + class MyListener extends Actor { + def receive = { + case RedeliverFailure(messages) => // ... + } + } + + val myListener = context.actorOf(Props[MyListener]) + val myChannel = context.actorOf(Channel.props( + ChannelSettings(redeliverFailureListener = Some(myListener)))) + //#channel-custom-listener } class MyProcessor3 extends Processor { @@ -254,9 +268,13 @@ trait PersistenceDocSpec { PersistentChannelSettings(redeliverInterval = 30 seconds, redeliverMax = 15)), name = "myPersistentChannel") - channel ! Deliver(Persistent("example"), destination) + channel ! Deliver(Persistent("example"), destination.path) //#persistent-channel-example - + //#persistent-channel-watermarks + PersistentChannelSettings( + pendingConfirmationsMax = 10000, + pendingConfirmationsMin = 2000) + //#persistent-channel-watermarks //#persistent-channel-reply PersistentChannelSettings(replyPersistent = true) //#persistent-channel-reply @@ -274,7 +292,7 @@ trait PersistenceDocSpec { // update state // ... // reliably deliver events - channel ! Deliver(Persistent(event), destination) + channel ! Deliver(Persistent(event), destination.path) } def receiveReplay: Receive = { @@ -290,4 +308,22 @@ trait PersistenceDocSpec { } //#reliable-event-delivery } + new AnyRef { + import akka.actor.Props + + //#view + class MyView extends View { + def processorId: String = "some-processor-id" + + def receive: Actor.Receive = { + case Persistent(payload, sequenceNr) => // ... + } + } + //#view + + //#view-update + val view = system.actorOf(Props[MyView]) + view ! Update(await = true) + //#view-update + } } diff --git a/akka-docs/rst/scala/code/docs/persistence/PersistencePluginDocSpec.scala b/akka-docs/rst/scala/code/docs/persistence/PersistencePluginDocSpec.scala index f751294844..80ece71cf1 100644 --- a/akka-docs/rst/scala/code/docs/persistence/PersistencePluginDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/persistence/PersistencePluginDocSpec.scala @@ -23,9 +23,9 @@ import akka.persistence.snapshot._ object PersistencePluginDocSpec { val config = """ - //#max-batch-size - akka.persistence.journal.max-batch-size = 200 - //#max-batch-size + //#max-message-batch-size + akka.persistence.journal.max-message-batch-size = 200 + //#max-message-batch-size //#journal-config akka.persistence.journal.leveldb.dir = "target/journal" //#journal-config @@ -119,10 +119,12 @@ trait SharedLeveldbPluginDocSpec { } class MyJournal extends AsyncWriteJournal { - def writeAsync(persistentBatch: Seq[PersistentRepr]): Future[Unit] = ??? - def deleteAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean): Future[Unit] = ??? - def confirmAsync(processorId: String, sequenceNr: Long, channelId: String): Future[Unit] = ??? - def replayAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: (PersistentRepr) => Unit): Future[Long] = ??? + def asyncWriteMessages(messages: Seq[PersistentRepr]): Future[Unit] = ??? + def asyncWriteConfirmations(confirmations: Seq[PersistentConfirmation]): Future[Unit] = ??? + def asyncDeleteMessages(messageIds: Seq[PersistentId], permanent: Boolean): Future[Unit] = ??? + def asyncDeleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean): Future[Unit] = ??? + def asyncReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: (PersistentRepr) => Unit): Future[Unit] = ??? + def asyncReadHighestSequenceNr(processorId: String, fromSequenceNr: Long): Future[Long] = ??? } class MySnapshotStore extends SnapshotStore { diff --git a/akka-docs/rst/scala/persistence.rst b/akka-docs/rst/scala/persistence.rst index dd2f5db4d7..b2ff70d781 100644 --- a/akka-docs/rst/scala/persistence.rst +++ b/akka-docs/rst/scala/persistence.rst @@ -5,21 +5,13 @@ Persistence ########### Akka persistence enables stateful actors to persist their internal state so that it can be recovered when an actor -is started, restarted by a supervisor or migrated in a cluster. It also allows stateful actors to recover from JVM -crashes, for example. The key concept behind Akka persistence is that only changes to an actor's internal state are -persisted but never its current state directly (except for optional snapshots). These changes are only ever appended -to storage, nothing is ever mutated, which allows for very high transaction rates and efficient replication. Stateful -actors are recovered by replaying stored changes to these actors from which they can rebuild internal state. This can -be either the full history of changes or starting from a snapshot of internal actor state which can dramatically -reduce recovery times. Akka persistence also provides point-to-point communication channels with at-least-once -message delivery guarantees. - -Storage backends for state changes and snapshots are pluggable in Akka persistence. Currently, these are written to -the local filesystem. Distributed and replicated storage, with the possibility of scaling writes, will be available -soon. - -Akka persistence is inspired by the `eventsourced`_ library. It follows the same concepts and architecture of -`eventsourced`_ but significantly differs on API and implementation level. +is started, restarted after a JVM crash or by a supervisor, or migrated in a cluster. The key concept behind Akka +persistence is that only changes to an actor's internal state are persisted but never its current state directly +(except for optional snapshots). These changes are only ever appended to storage, nothing is ever mutated, which +allows for very high transaction rates and efficient replication. Stateful actors are recovered by replaying stored +changes to these actors from which they can rebuild internal state. This can be either the full history of changes +or starting from a snapshot which can dramatically reduce recovery times. Akka persistence also provides point-to-point +communication channels with at-least-once message delivery semantics. .. warning:: @@ -28,6 +20,9 @@ Akka persistence is inspired by the `eventsourced`_ library. It follows the same changes to a minimum the binary compatibility guarantee for maintenance releases does not apply to the contents of the ``akka.persistence`` package. +Akka persistence is inspired by and the official replacement of the `eventsourced`_ library. It follows the same +concepts and architecture of `eventsourced`_ but significantly differs on API and implementation level. + .. _eventsourced: https://github.com/eligosource/eventsourced Dependencies @@ -44,16 +39,22 @@ Architecture before its ``receive`` method is called. When a processor is started or restarted, journaled messages are replayed to that processor, so that it can recover internal state from these messages. -* *Channel*: Channels are used by processors to communicate with other actors. They prevent that replayed messages - are redundantly delivered to these actors and provide at-least-once message delivery guarantees, also in case of - sender and receiver JVM crashes. +* *View*: A view is a persistent, stateful actor that receives journaled messages that have been written by another + processor. A view itself does not journal new messages, instead, it updates internal state only from a processor's + replicated message stream. + +* *Channel*: Channels are used by processors and views to communicate with other actors. They prevent that replayed + messages are redundantly delivered to these actors and provide at-least-once message delivery semantics, also in + case of sender and receiver JVM crashes. * *Journal*: A journal stores the sequence of messages sent to a processor. An application can control which messages - are stored and which are received by the processor without being journaled. The storage backend of a journal is - pluggable. + are journaled and which are received by the processor without being journaled. The storage backend of a journal is + pluggable. The default journal storage plugin writes to the local filesystem, replicated journals are available as + :ref:`community-projects`. -* *Snapshot store*: A snapshot store persists snapshots of a processor's internal state. Snapshots are used for - optimizing recovery times. The storage backend of a snapshot store is pluggable. +* *Snapshot store*: A snapshot store persists snapshots of a processor's or a view's internal state. Snapshots are + used for optimizing recovery times. The storage backend of a snapshot store is pluggable. The default snapshot + storage plugin writes to the local filesystem. * *Event sourcing*. Based on the building blocks described above, Akka persistence provides abstractions for the development of event sourced applications (see section :ref:`event-sourcing`) @@ -70,10 +71,9 @@ A processor can be implemented by extending the ``Processor`` trait and implemen Processors only write messages of type ``Persistent`` to the journal, others are received without being persisted. When a processor's ``receive`` method is called with a ``Persistent`` message it can safely assume that this message has been successfully written to the journal. If a journal fails to write a ``Persistent`` message then the processor -is stopped, by default. If an application wants that a processors continues to run on persistence failures it must -handle ``PersistenceFailure`` messages. In this case, a processor may want to inform the sender about the failure, -so that the sender can re-send the message, if needed, under the assumption that the journal recovered from a -temporary failure. +is stopped, by default. If a processor should continue running on persistence failures it must handle +``PersistenceFailure`` messages. In this case, a processor may want to inform the sender about the failure, +so that the sender can re-send the message, if needed. A ``Processor`` itself is an ``Actor`` and can therefore be instantiated with ``actorOf``. @@ -84,7 +84,7 @@ Recovery By default, a processor is automatically recovered on start and on restart by replaying journaled messages. New messages sent to a processor during recovery do not interfere with replayed messages. New messages will -only be received by that processor after recovery completes. +only be received by a processor after recovery completes. Recovery customization ^^^^^^^^^^^^^^^^^^^^^^ @@ -132,7 +132,7 @@ that message as argument. An optional ``permanent`` parameter specifies whether deleted from the journal or only marked as deleted. In both cases, the message won't be replayed. Later extensions to Akka persistence will allow to replay messages that have been marked as deleted which can be useful for debugging purposes, for example. To delete all messages (journaled by a single processor) up to a specified sequence number, -processors can call the ``deleteMessages`` method. +processors should call the ``deleteMessages`` method. Identifiers ----------- @@ -145,41 +145,103 @@ method. Applications can customize a processor's id by specifying an actor name during processor creation as shown in section :ref:`processors`. This changes that processor's name in its actor hierarchy and hence influences only -part of the processor id. To fully customize a processor's id, the ``processorId`` method should be overridden. +part of the processor id. To fully customize a processor's id, the ``processorId`` method must be overridden. .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#processor-id-override +Overriding ``processorId`` is the recommended way to generate stable identifiers. + +.. _views: + +Views +===== + +Views can be implemented by extending the ``View`` trait and implementing the ``receive`` and the ``processorId`` +methods. + +.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#view + +The ``processorId`` identifies the processor from which the view receives journaled messages. It is not necessary +the referenced processor is actually running. Views read messages from a processor's journal directly. When a +processor is started later and begins to write new messages, the corresponding view is updated automatically, by +default. + +Updates +------- + +The default update interval of all views of an actor system is configurable: + +.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#auto-update-interval + +``View`` implementation classes may also override the ``autoUpdateInterval`` method to return a custom update +interval for a specific view class or view instance. Applications may also trigger additional updates at +any time by sending a view an ``Update`` message. + +.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#view-update + +If the ``await`` parameter is set to ``true``, messages that follow the ``Update`` request are processed when the +incremental message replay, triggered by that update request, completed. If set to ``false`` (default), messages +following the update request may interleave with the replayed message stream. Automated updates always run with +``await = false``. + +Automated updates of all views of an actor system can be turned off by configuration: + +.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#auto-update + +Implementation classes may override the configured default value by overriding the ``autoUpdate`` method. To +limit the number of replayed messages per update request, applications can configure a custom +``akka.persistence.view.auto-update-replay-max`` value or override the ``autoUpdateReplayMax`` method. The number +of replayed messages for manual updates can be limited with the ``replayMax`` parameter of the ``Update`` message. + +Recovery +-------- + +Initial recovery of views works in the very same way as for :ref:`processors` (i.e. by sending a ``Recover`` message +to self). The maximum number of replayed messages during initial recovery is determined by ``autoUpdateReplayMax``. +Further possibilities to customize initial recovery are explained in section :ref:`processors`. + +Identifiers +----------- + +A view must have an identifier that doesn't change across different actor incarnations. It defaults to the +``String`` representation of the actor path without the address part and can be obtained via the ``viewId`` +method. + +Applications can customize a view's id by specifying an actor name during view creation. This changes that view's +name in its actor hierarchy and hence influences only part of the view id. To fully customize a view's id, the +``viewId`` method must be overridden. Overriding ``viewId`` is the recommended way to generate stable identifiers. + +The ``viewId`` must differ from the referenced ``processorId``, unless :ref:`snapshots` of a view and its +processor shall be shared (which is what applications usually do not want). + .. _channels: Channels ======== -.. warning:: +Channels are special actors that are used by processors or views to communicate with other actors (channel +destinations). The following discusses channels in context of processors but this is also applicable to views. - There are further changes planned to the channel API that couldn't make it into the current milestone. - One example is to have only a single destination per channel to allow gap detection and more advanced - flow control. - -Channels are special actors that are used by processors to communicate with other actors (channel destinations). Channels prevent redundant delivery of replayed messages to destinations during processor recovery. A replayed -message is retained by a channel if its previous delivery has been confirmed by a destination. +message is retained by a channel if its delivery has been confirmed by a destination. .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#channel-example A channel is ready to use once it has been created, no recovery or further activation is needed. A ``Deliver`` -request instructs a channel to send a ``Persistent`` message to a destination. Sender references are preserved -by a channel, therefore, a destination can reply to the sender of a ``Deliver`` request. +request instructs a channel to send a ``Persistent`` message to a destination. A destination is provided as +``ActorPath`` and messages are sent by the channel via that path's ``ActorSelection``. Sender references are +preserved by a channel, therefore, a destination can reply to the sender of a ``Deliver`` request. -If a processor wants to reply to a ``Persistent`` message sender it should use the ``sender`` reference as channel +If a processor wants to reply to a ``Persistent`` message sender it should use the ``sender`` path as channel destination. .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#channel-example-reply Persistent messages delivered by a channel are of type ``ConfirmablePersistent``. ``ConfirmablePersistent`` extends -``Persistent`` by adding the methods ``confirm`` method and ``redeliveries`` (see also :ref:`redelivery`). Channel -destinations confirm the delivery of a ``ConfirmablePersistent`` message by calling ``confirm()`` an that message. -This asynchronously writes a confirmation entry to the journal. Replayed messages internally contain these confirmation -entries which allows a channel to decide if a message should be retained or not. +``Persistent`` by adding the methods ``confirm`` and ``redeliveries`` (see also :ref:`redelivery`). A channel +destination confirms the delivery of a ``ConfirmablePersistent`` message by calling ``confirm()`` on that message. +This asynchronously writes a confirmation entry to the journal. Replayed messages internally contain confirmation +entries which allows a channel to decide if it should retain these messages or not. A ``Processor`` can also be used as channel destination i.e. it can persist ``ConfirmablePersistent`` messages too. @@ -188,25 +250,23 @@ A ``Processor`` can also be used as channel destination i.e. it can persist ``Co Message re-delivery ------------------- -Channels re-deliver messages to destinations if they do not confirm their receipt within a configurable timeout. +Channels re-deliver messages to destinations if they do not confirm delivery within a configurable timeout. This timeout can be specified as ``redeliverInterval`` when creating a channel, optionally together with the -maximum number of re-deliveries a channel should attempt for each unconfirmed message. +maximum number of re-deliveries a channel should attempt for each unconfirmed message. The number of re-delivery +attempts can be obtained via the ``redeliveries`` method on ``ConfirmablePersistent`` or by pattern matching. .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#channel-custom-settings -Message re-delivery is done out of order with regards to normal delivery i.e. redelivered messages may arrive -later than newer normally delivered messages. The number of re-delivery attempts can be obtained via the -``redeliveries`` method on ``ConfirmablePersistent`` or by pattern matching. +A channel keeps messages in memory until their successful delivery has been confirmed or the maximum number of +re-deliveries is reached. To be notified about messages that have reached the maximum number of re-deliveries, +applications can register a listener at channel creation. -A channel keeps messages in memory until their successful delivery has been confirmed by their destination(s) -or their maximum number of re-deliveries is reached. In the latter case, the application has to re-send the -correspnding ``Deliver`` request to the channel so that the channel can start a new series of delivery attempts -(starting again with a ``redeliveries`` count of ``0``). +.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#channel-custom-listener -Re-sending ``Deliver`` requests is done automatically if the sending processor replays messages: only ``Deliver`` -requests of unconfirmed messages will be served again by the channel. A message replay can be enforced by an -application by restarting the sending processor, for example. A replay will also take place if the whole -application is restarted, either after normal termination or after a crash. +A listener receives ``RedeliverFailure`` notifications containing all messages that could not be delivered. On +receiving a ``RedeliverFailure`` message, an application may decide to restart the sending processor to enforce +a re-send of these messages to the channel or confirm these messages to prevent further re-sends. The sending +processor can also be restarted any time later to re-send unconfirmed messages. This combination of @@ -215,7 +275,7 @@ This combination of * message re-deliveries by channels and * application-level confirmations (acknowledgements) by destinations -enables channels to provide at-least-once message delivery guarantees. Possible duplicates can be detected by +enables channels to provide at-least-once message delivery semantics. Possible duplicates can be detected by destinations by tracking message sequence numbers. Message sequence numbers are generated per sending processor. Depending on how a processor routes outbound messages to destinations, they may either see a contiguous message sequence or a sequence with gaps. @@ -224,14 +284,13 @@ sequence or a sequence with gaps. If a processor emits more than one outbound message per inbound ``Persistent`` message it **must** use a separate channel for each outbound message to ensure that confirmations are uniquely identifiable, otherwise, - at-least-once message delivery is not guaranteed. This rule has been introduced to avoid writing additional + at-least-once message delivery semantics do not apply. This rule has been introduced to avoid writing additional outbound message identifiers to the journal which would decrease the overall throughput. It is furthermore recommended to collapse multiple outbound messages to the same destination into a single outbound message, - otherwise, if sent via multiple channels, their ordering is not defined. These restrictions are likely to be - removed in the final release. + otherwise, if sent via multiple channels, their ordering is not defined. -Whenever an application wants to have more control how sequence numbers are assigned to messages it should use -an application-specific sequence number generator and include the generated sequence numbers into the ``payload`` +If an application wants to have more control how sequence numbers are assigned to messages it should use an +application-specific sequence number generator and include the generated sequence numbers into the ``payload`` of ``Persistent`` messages. Persistent channels @@ -241,60 +300,45 @@ Channels created with ``Channel.props`` do not persist messages. These channels with a sending processor that takes care of persistence, hence, channel-specific persistence is not necessary in this case. They are referred to as transient channels in the following. -Applications may also use transient channels standalone (i.e. without a sending processor) if re-delivery attempts -to destinations are required but message loss in case of a sender JVM crash is not an issue. If applications want to -use standalone channels but message loss is not acceptable, they should use persistent channels. A persistent channel -can be created with ``PersistentChannel.props`` and configured with a ``PersistentChannelSettings`` object. +Persistent channels are like transient channels but additionally persist messages before delivering them. Messages +that have been persisted by a persistent channel are deleted when destinations confirm their delivery. A persistent +channel can be created with ``PersistentChannel.props`` and configured with a ``PersistentChannelSettings`` object. .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#persistent-channel-example -A persistent channel is like a transient channel that additionally persists ``Deliver`` requests before serving it. -Hence, it can recover from sender JVM crashes and provide the same message re-delivery semantics as a transient -channel in combination with an application-defined processor. +A persistent channel is useful for delivery of messages to slow destinations or destinations that are unavailable +for a long time. It can constrain the number of pending confirmations based on the ``pendingConfirmationsMax`` +and ``pendingConfirmationsMin`` parameters of ``PersistentChannelSettings``. -By default, a persistent channel doesn't reply whether a ``Persistent`` message, sent with ``Deliver``, has been -successfully persisted or not. This can be enabled by creating the channel with the ``replyPersistent`` configuration -parameter set to ``true``: +.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#persistent-channel-watermarks + +It suspends delivery when the number of pending confirmations reaches ``pendingConfirmationsMax`` and resumes +delivery again when this number falls below ``pendingConfirmationsMin``. This prevents both, flooding destinations +with more messages than they can process and unlimited memory consumption by the channel. A persistent channel +continues to persist new messages even when message delivery is temporarily suspended. + +Standalone usage +---------------- + +Applications may also use channels standalone. Transient channels can be used standalone if re-delivery attempts +to destinations are required but message loss in case of a sender JVM crash is not an issue. If message loss in +case of a sender JVM crash is an issue, persistent channels should be used. In this case, applications may want to +receive replies from the channel whether messages have been successfully persisted or not. This can be enabled by +creating the channel with the ``replyPersistent`` configuration parameter set to ``true``: .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#persistent-channel-reply -With this setting, either the successfully persisted message is replied to the sender or a ``PersistenceFailure``. -In case of a persistence failure, the sender should re-send the message. - -Using a persistent channel in combination with an application-defined processor can make sense if destinations are -unavailable for a long time and an application doesn't want to buffer all messages in memory (but write them to the -journal only). In this case, delivery can be disabled by sending the channel a ``DisableDelivery`` message (to -stop delivery and persist-only) and re-enabled again by sending it an ``EnableDelivery`` message. A disabled channel -that receives an ``EnableDelivery`` message, processes all persisted, unconfirmed ``Deliver`` requests again before -serving new ones. - -Sender resolution ------------------ - -``ActorRef`` s of ``Persistent`` message senders are also stored in the journal. Consequently, they may become invalid if -an application is restarted and messages are replayed. For example, the stored ``ActorRef`` may then reference -a previous incarnation of a sender and a new incarnation of that sender cannot receive a reply from a processor. -This may be acceptable for many applications but others may require that a new sender incarnation receives the -reply (to reliably resume a conversation between actors after a JVM crash, for example). Here, a channel may -assist in resolving new sender incarnations by specifying a third ``Deliver`` argument: - -* ``Resolve.Destination`` if the sender of a persistent message is used as channel destination - - .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#resolve-destination - -* ``Resolve.Sender`` if the sender of a persistent message is forwarded to a destination. - - .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#resolve-sender - -Default is ``Resolve.Off`` which means no resolution. Find out more in the ``Deliver`` API docs. +With this setting, either the successfully persisted message is replied to the sender or a ``PersistenceFailure`` +message. In case the latter case, the sender should re-send the message. Identifiers ----------- -In the same way as :ref:`processors`, channels also have an identifier that defaults to a channel's path. A channel -identifier can therefore be customized by using a custom actor name at channel creation. This changes that channel's -name in its actor hierarchy and hence influences only part of the channel identifier. To fully customize a channel -identifier, it should be provided as argument ``Channel.props(String)`` or ``PersistentChannel.props(String)``. +In the same way as :ref:`processors` and :ref:`views`, channels also have an identifier that defaults to a channel's +path. A channel identifier can therefore be customized by using a custom actor name at channel creation. This changes +that channel's name in its actor hierarchy and hence influences only part of the channel identifier. To fully customize +a channel identifier, it should be provided as argument ``Channel.props(String)`` or ``PersistentChannel.props(String)`` +(recommended to generate stable identifiers). .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#channel-id-override @@ -313,7 +357,7 @@ method or by pattern matching .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#payload-pattern-matching Inside processors, new persistent messages are derived from the current persistent message before sending them via a -channel, either by calling ``p.withPayload(...)`` or ``Persistent.create(...)`` where the latter uses the +channel, either by calling ``p.withPayload(...)`` or ``Persistent(...)`` where the latter uses the implicit ``currentPersistentMessage`` made available by ``Processor``. .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#current-message @@ -333,16 +377,18 @@ method or by pattern matching .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#sequence-nr-pattern-matching Persistent messages are assigned sequence numbers on a per-processor basis (or per channel basis if used -standalone). A sequence starts at ``1L`` and doesn't contain gaps unless a processor deletes a message. +standalone). A sequence starts at ``1L`` and doesn't contain gaps unless a processor deletes messages. .. _snapshots: Snapshots ========= -Snapshots can dramatically reduce recovery times. Processors can save snapshots of internal state by calling the -``saveSnapshot`` method on ``Processor``. If saving of a snapshot succeeds, the processor will receive a -``SaveSnapshotSuccess`` message, otherwise a ``SaveSnapshotFailure`` message +Snapshots can dramatically reduce recovery times of processors and views. The following discusses snapshots +in context of processors but this is also applicable to views. + +Processors can save snapshots of internal state by calling the ``saveSnapshot`` method. If saving of a snapshot +succeeds, the processor receives a ``SaveSnapshotSuccess`` message, otherwise a ``SaveSnapshotFailure`` message .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#save-snapshot @@ -370,9 +416,9 @@ saved snapshot matches the specified ``SnapshotSelectionCriteria`` will replay a Snapshot deletion ----------------- -A processor can delete a single snapshot by calling the ``deleteSnapshot`` method with the sequence number and the -timestamp of the snapshot as argument. To bulk-delete snapshots that match a specified ``SnapshotSelectionCriteria`` -argument, processors can call the ``deleteSnapshots`` method. +A processor can delete individual snapshots by calling the ``deleteSnapshot`` method with the sequence number and the +timestamp of a snapshot as argument. To bulk-delete snapshots matching ``SnapshotSelectionCriteria``, processors should +use the ``deleteSnapshots`` method. .. _event-sourcing: @@ -399,8 +445,7 @@ also process commands that do not change application state, such as query comman Akka persistence supports event sourcing with the ``EventsourcedProcessor`` trait (which implements event sourcing as a pattern on top of command sourcing). A processor that extends this trait does not handle ``Persistent`` messages directly but uses the ``persist`` method to persist and handle events. The behavior of an ``EventsourcedProcessor`` -is defined by implementing ``receiveReplay`` and ``receiveCommand``. This is best explained with an example (which -is also part of ``akka-sample-persistence``). +is defined by implementing ``receiveReplay`` and ``receiveCommand``. This is demonstrated in the following example. .. includecode:: ../../../akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/EventsourcedExample.scala#eventsourced-example @@ -413,28 +458,25 @@ a command is handled by generating two events which are then persisted and handl ``persist`` with an event (or a sequence of events) as first argument and an event handler as second argument. The ``persist`` method persists events asynchronously and the event handler is executed for successfully persisted -events. Successfully persisted events are internally sent back to the processor as separate messages which trigger -the event handler execution. An event handler may therefore close over processor state and mutate it. The sender -of a persisted event is the sender of the corresponding command. This allows event handlers to reply to the sender -of a command (not shown). +events. Successfully persisted events are internally sent back to the processor as individual messages that trigger +event handler executions. An event handler may close over processor state and mutate it. The sender of a persisted +event is the sender of the corresponding command. This allows event handlers to reply to the sender of a command +(not shown). The main responsibility of an event handler is changing processor state using event data and notifying others about successful state changes by publishing events. -When persisting events with ``persist`` it is guaranteed that the processor will not receive new commands between +When persisting events with ``persist`` it is guaranteed that the processor will not receive further commands between the ``persist`` call and the execution(s) of the associated event handler. This also holds for multiple ``persist`` -calls in context of a single command. - -The example also demonstrates how to change the processor's default behavior, defined by ``receiveCommand``, to -another behavior, defined by ``otherCommandHandler``, and back using ``context.become()`` and ``context.unbecome()``. -See also the API docs of ``persist`` for further details. +calls in context of a single command. The example also shows how to switch between command different command handlers +with ``context.become()`` and ``context.unbecome()``. Reliable event delivery ----------------------- -Sending events from an event handler to another actor directly doesn't guarantee delivery of these events. To -guarantee at-least-once delivery, :ref:`channels` must be used. In this case, also replayed events (received by -``receiveReplay``) must be sent to a channel, as shown in the following example: +Sending events from an event handler to another actor has at-most-once delivery semantics. For at-least-once delivery, +:ref:`channels` must be used. In this case, also replayed events (received by ``receiveReplay``) must be sent to a +channel, as shown in the following example: .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#reliable-event-delivery @@ -449,29 +491,33 @@ To optimize throughput, a ``Processor`` internally batches received ``Persistent writing them to the journal (as a single batch). The batch size dynamically grows from 1 under low and moderate loads to a configurable maximum size (default is ``200``) under high load. -.. includecode:: code/docs/persistence/PersistencePluginDocSpec.scala#max-batch-size +.. includecode:: code/docs/persistence/PersistencePluginDocSpec.scala#max-message-batch-size A new batch write is triggered by a processor as soon as a batch reaches the maximum size or if the journal completed -writing the previous batch. Batch writes are never timer-based which keeps latencies as low as possible. +writing the previous batch. Batch writes are never timer-based which keeps latencies at a minimum. Applications that want to have more explicit control over batch writes and batch sizes can send processors ``PersistentBatch`` messages. .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#batch-write -``Persistent`` messages contained in a ``PersistentBatch`` message are always written atomically, even if the batch -size is greater than ``max-batch-size``. Also, a ``PersistentBatch`` is written isolated from other batches. +``Persistent`` messages contained in a ``PersistentBatch`` are always written atomically, even if the batch +size is greater than ``max-message-batch-size``. Also, a ``PersistentBatch`` is written isolated from other batches. ``Persistent`` messages contained in a ``PersistentBatch`` are received individually by a processor. ``PersistentBatch`` messages, for example, are used internally by an ``EventsourcedProcessor`` to ensure atomic -writes of events. All events that are persisted in context of a single command are written as single batch to the +writes of events. All events that are persisted in context of a single command are written as a single batch to the journal (even if ``persist`` is called multiple times per command). The recovery of an ``EventsourcedProcessor`` -will therefore never be done partially i.e. with only a subset of events persisted by a single command. +will therefore never be done partially (with only a subset of events persisted by a single command). + +Confirmation and deletion operations performed by :ref:`channels` are also batched. The maximum confirmation +and deletion batch sizes are configurable with ``akka.persistence.journal.max-confirmation-batch-size`` and +``akka.persistence.journal.max-deletion-batch-size``, respectively. Storage plugins =============== -Storage backends for journals and snapshot stores are plugins in akka-persistence. The default journal plugin +Storage backends for journals and snapshot stores are pluggable in Akka persistence. The default journal plugin writes messages to LevelDB (see :ref:`local-leveldb-journal`). The default snapshot store plugin writes snapshots as individual files to the local filesystem (see :ref:`local-snapshot-store`). Applications can provide their own plugins by implementing a plugin API and activate them by configuration. Plugin development requires the following @@ -483,19 +529,19 @@ Journal plugin API ------------------ A journal plugin either extends ``SyncWriteJournal`` or ``AsyncWriteJournal``. ``SyncWriteJournal`` is an -actor that should be extended when the storage backend API only supports synchronous, blocking writes. The -methods to be implemented in this case are: +actor that should be extended when the storage backend API only supports synchronous, blocking writes. In this +case, the methods to be implemented are: .. includecode:: ../../../akka-persistence/src/main/scala/akka/persistence/journal/SyncWriteJournal.scala#journal-plugin-api ``AsyncWriteJournal`` is an actor that should be extended if the storage backend API supports asynchronous, -non-blocking writes. The methods to be implemented in that case are: +non-blocking writes. In this case, the methods to be implemented are: .. includecode:: ../../../akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala#journal-plugin-api -Message replays are always asynchronous, therefore, any journal plugin must implement: +Message replays and sequence number recovery are always asynchronous, therefore, any journal plugin must implement: -.. includecode:: ../../../akka-persistence/src/main/scala/akka/persistence/journal/AsyncReplay.scala#journal-plugin-api +.. includecode:: ../../../akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala#journal-plugin-api A journal plugin can be activated with the following minimal configuration: @@ -542,15 +588,15 @@ Shared LevelDB journal ---------------------- A LevelDB instance can also be shared by multiple actor systems (on the same or on different nodes). This, for -example, allows processors to failover to a backup node, assuming that the node, where the shared instance is -runnning, is accessible from the backup node. +example, allows processors to failover to a backup node and continue using the shared journal instance from the +backup node. .. warning:: A shared LevelDB instance is a single point of failure and should therefore only be used for testing - purposes. + purposes. Highly-available, replicated journal are available as :ref:`community-projects`. -A shared LevelDB instance can be created by instantiating the ``SharedLeveldbStore`` actor. +A shared LevelDB instance is started by instantiating the ``SharedLeveldbStore`` actor. .. includecode:: code/docs/persistence/PersistencePluginDocSpec.scala#shared-store-creation @@ -577,18 +623,20 @@ i.e. only the first injection is used. Local snapshot store -------------------- -The default snapshot store plugin is ``akka.persistence.snapshot-store.local`` which writes snapshot files to +The default snapshot store plugin is ``akka.persistence.snapshot-store.local``. It writes snapshot files to the local filesystem. The default storage location is a directory named ``snapshots`` in the current working directory. This can be changed by configuration where the specified path can be relative or absolute: .. includecode:: code/docs/persistence/PersistencePluginDocSpec.scala#snapshot-config -Planned plugins ---------------- +.. _community-projects: -* Shared snapshot store (SPOF, for testing purposes) -* HA snapshot store backed by a distributed file system -* HA journal backed by a distributed (NoSQL) data store +Community plugins +----------------- + +* `Replicated journal backed by Apache Cassandra `_. +* `Replicated journal backed by Apache HBase `_. +* `Replicated journal backed by MongoDB `_. Custom serialization ==================== @@ -603,8 +651,7 @@ it must add .. includecode:: code/docs/persistence/PersistenceSerializerDocSpec.scala#custom-serializer-config -to the application configuration. If not specified, a default serializer is used, which is the ``JavaSerializer`` -in this example. +to the application configuration. If not specified, a default serializer is used. Testing ======= @@ -618,8 +665,7 @@ or .. includecode:: code/docs/persistence/PersistencePluginDocSpec.scala#shared-store-native-config -in your Akka configuration. The latter setting applies if you're using a :ref:`shared-leveldb-journal`. The LevelDB -Java port is for testing purposes only. +in your Akka configuration. The LevelDB Java port is for testing purposes only. Miscellaneous ============= diff --git a/akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncReplayPlugin.java b/akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncRecoveryPlugin.java similarity index 60% rename from akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncReplayPlugin.java rename to akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncRecoveryPlugin.java index 099f277532..81fb48d5be 100644 --- a/akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncReplayPlugin.java +++ b/akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncRecoveryPlugin.java @@ -9,16 +9,14 @@ import scala.concurrent.Future; import akka.japi.Procedure; import akka.persistence.PersistentRepr; -interface AsyncReplayPlugin { +interface AsyncRecoveryPlugin { //#async-replay-plugin-api /** * Java API, Plugin API: asynchronously replays persistent messages. * Implementations replay a message by calling `replayCallback`. The returned * future must be completed when all messages (matching the sequence number - * bounds) have been replayed. The future `Long` value must be the highest - * stored sequence number in the journal for the specified processor. The - * future must be completed with a failure if any of the persistent messages - * could not be replayed. + * bounds) have been replayed. The future must be completed with a failure if + * any of the persistent messages could not be replayed. * * The `replayCallback` must also be called with messages that have been marked * as deleted. In this case a replayed message's `deleted` method must return @@ -30,9 +28,20 @@ interface AsyncReplayPlugin { * @param processorId processor id. * @param fromSequenceNr sequence number where replay should start (inclusive). * @param toSequenceNr sequence number where replay should end (inclusive). + * @param max maximum number of messages to be replayed. * @param replayCallback called to replay a single message. Can be called from any * thread. */ - Future doReplayAsync(String processorId, long fromSequenceNr, long toSequenceNr, Procedure replayCallback); + Future doAsyncReplayMessages(String processorId, long fromSequenceNr, long toSequenceNr, long max, Procedure replayCallback); + + /** + * Java API, Plugin API: asynchronously reads the highest stored sequence number + * for the given `processorId`. + * + * @param processorId processor id. + * @param fromSequenceNr hint where to start searching for the highest sequence + * number. + */ + Future doAsyncReadHighestSequenceNr(String processorId, long fromSequenceNr); //#async-replay-plugin-api } diff --git a/akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncWritePlugin.java b/akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncWritePlugin.java index 350c9be3df..bdcbe1d070 100644 --- a/akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncWritePlugin.java +++ b/akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncWritePlugin.java @@ -6,31 +6,37 @@ package akka.persistence.journal.japi; import scala.concurrent.Future; -import akka.persistence.PersistentRepr; +import akka.persistence.*; interface AsyncWritePlugin { //#async-write-plugin-api /** - * Java API, Plugin API: asynchronously writes a batch of persistent messages to the + * Java API, Plugin API: synchronously writes a batch of persistent messages to the * journal. The batch write must be atomic i.e. either all persistent messages in the * batch are written or none. */ - Future doWriteAsync(Iterable persistentBatch); + Future doAsyncWriteMessages(Iterable messages); /** - * Java API, Plugin API: asynchronously deletes all persistent messages within the - * range from `fromSequenceNr` to `toSequenceNr`. If `permanent` is set to `false`, - * the persistent messages are marked as deleted, otherwise they are permanently - * deleted. + * Java API, Plugin API: synchronously writes a batch of delivery confirmations to + * the journal. + */ + Future doAsyncWriteConfirmations(Iterable confirmations); + + /** + * Java API, Plugin API: synchronously deletes messages identified by `messageIds` + * from the journal. If `permanent` is set to `false`, the persistent messages are + * marked as deleted, otherwise they are permanently deleted. + */ + Future doAsyncDeleteMessages(Iterable messageIds, boolean permanent); + + /** + * Java API, Plugin API: synchronously deletes all persistent messages up to + * `toSequenceNr`. If `permanent` is set to `false`, the persistent messages are + * marked as deleted, otherwise they are permanently deleted. * - * @see AsyncReplayPlugin + * @see AsyncRecoveryPlugin */ - Future doDeleteAsync(String processorId, long fromSequenceNr, long toSequenceNr, boolean permanent); - - /** - * Java API, Plugin API: asynchronously writes a delivery confirmation to the - * journal. - */ - Future doConfirmAsync(String processorId, long sequenceNr, String channelId); + Future doAsyncDeleteMessagesTo(String processorId, long toSequenceNr, boolean permanent); //#async-write-plugin-api } diff --git a/akka-persistence/src/main/java/akka/persistence/journal/japi/SyncWritePlugin.java b/akka-persistence/src/main/java/akka/persistence/journal/japi/SyncWritePlugin.java index f15da52f34..9e03e2d206 100644 --- a/akka-persistence/src/main/java/akka/persistence/journal/japi/SyncWritePlugin.java +++ b/akka-persistence/src/main/java/akka/persistence/journal/japi/SyncWritePlugin.java @@ -4,7 +4,7 @@ package akka.persistence.journal.japi; -import akka.persistence.PersistentRepr; +import akka.persistence.*; interface SyncWritePlugin { //#sync-write-plugin-api @@ -13,21 +13,28 @@ interface SyncWritePlugin { * journal. The batch write must be atomic i.e. either all persistent messages in the * batch are written or none. */ - void doWrite(Iterable persistentBatch); + void doWriteMessages(Iterable messages); /** - * Java API, Plugin API: synchronously deletes all persistent messages within the - * range from `fromSequenceNr` to `toSequenceNr`. If `permanent` is set to `false`, - * the persistent messages are marked as deleted, otherwise they are permanently - * deleted. + * Java API, Plugin API: synchronously writes a batch of delivery confirmations to + * the journal. + */ + void doWriteConfirmations(Iterable confirmations); + + /** + * Java API, Plugin API: synchronously deletes messages identified by `messageIds` + * from the journal. If `permanent` is set to `false`, the persistent messages are + * marked as deleted, otherwise they are permanently deleted. + */ + void doDeleteMessages(Iterable messageIds, boolean permanent); + + /** + * Java API, Plugin API: synchronously deletes all persistent messages up to + * `toSequenceNr`. If `permanent` is set to `false`, the persistent messages are + * marked as deleted, otherwise they are permanently deleted. * - * @see AsyncReplayPlugin + * @see AsyncRecoveryPlugin */ - void doDelete(String processorId, long fromSequenceNr, long toSequenceNr, boolean permanent); - - /** - * Java API, Plugin API: synchronously writes a delivery confirmation to the journal. - */ - void doConfirm(String processorId, long sequenceNr, String channelId) throws Exception; + void doDeleteMessagesTo(String processorId, long toSequenceNr, boolean permanent); //#sync-write-plugin-api } diff --git a/akka-persistence/src/main/java/akka/persistence/serialization/MessageFormats.java b/akka-persistence/src/main/java/akka/persistence/serialization/MessageFormats.java index 4ce1cda1c5..340c98e995 100644 --- a/akka-persistence/src/main/java/akka/persistence/serialization/MessageFormats.java +++ b/akka-persistence/src/main/java/akka/persistence/serialization/MessageFormats.java @@ -746,16 +746,6 @@ public final class MessageFormats { */ boolean getDeleted(); - // optional bool resolved = 5; - /** - * optional bool resolved = 5; - */ - boolean hasResolved(); - /** - * optional bool resolved = 5; - */ - boolean getResolved(); - // optional int32 redeliveries = 6; /** * optional int32 redeliveries = 6; @@ -796,19 +786,19 @@ public final class MessageFormats { */ boolean getConfirmable(); - // optional .ConfirmMessage confirmMessage = 9; + // optional .DeliveredMessage confirmMessage = 9; /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ boolean hasConfirmMessage(); /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ - akka.persistence.serialization.MessageFormats.ConfirmMessage getConfirmMessage(); + akka.persistence.serialization.MessageFormats.DeliveredMessage getConfirmMessage(); /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ - akka.persistence.serialization.MessageFormats.ConfirmMessageOrBuilder getConfirmMessageOrBuilder(); + akka.persistence.serialization.MessageFormats.DeliveredMessageOrBuilder getConfirmMessageOrBuilder(); // optional string confirmTarget = 10; /** @@ -919,49 +909,44 @@ public final class MessageFormats { deleted_ = input.readBool(); break; } - case 40: { - bitField0_ |= 0x00000010; - resolved_ = input.readBool(); - break; - } case 48: { - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000010; redeliveries_ = input.readInt32(); break; } case 58: { - if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { confirms_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000040; + mutable_bitField0_ |= 0x00000020; } confirms_.add(input.readBytes()); break; } case 64: { - bitField0_ |= 0x00000040; + bitField0_ |= 0x00000020; confirmable_ = input.readBool(); break; } case 74: { - akka.persistence.serialization.MessageFormats.ConfirmMessage.Builder subBuilder = null; - if (((bitField0_ & 0x00000080) == 0x00000080)) { + akka.persistence.serialization.MessageFormats.DeliveredMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000040) == 0x00000040)) { subBuilder = confirmMessage_.toBuilder(); } - confirmMessage_ = input.readMessage(akka.persistence.serialization.MessageFormats.ConfirmMessage.PARSER, extensionRegistry); + confirmMessage_ = input.readMessage(akka.persistence.serialization.MessageFormats.DeliveredMessage.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(confirmMessage_); confirmMessage_ = subBuilder.buildPartial(); } - bitField0_ |= 0x00000080; + bitField0_ |= 0x00000040; break; } case 82: { - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000080; confirmTarget_ = input.readBytes(); break; } case 90: { - bitField0_ |= 0x00000200; + bitField0_ |= 0x00000100; sender_ = input.readBytes(); break; } @@ -973,7 +958,7 @@ public final class MessageFormats { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { confirms_ = new com.google.protobuf.UnmodifiableLazyStringList(confirms_); } this.unknownFields = unknownFields.build(); @@ -1105,22 +1090,6 @@ public final class MessageFormats { return deleted_; } - // optional bool resolved = 5; - public static final int RESOLVED_FIELD_NUMBER = 5; - private boolean resolved_; - /** - * optional bool resolved = 5; - */ - public boolean hasResolved() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bool resolved = 5; - */ - public boolean getResolved() { - return resolved_; - } - // optional int32 redeliveries = 6; public static final int REDELIVERIES_FIELD_NUMBER = 6; private int redeliveries_; @@ -1128,7 +1097,7 @@ public final class MessageFormats { * optional int32 redeliveries = 6; */ public boolean hasRedeliveries() { - return ((bitField0_ & 0x00000020) == 0x00000020); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional int32 redeliveries = 6; @@ -1174,7 +1143,7 @@ public final class MessageFormats { * optional bool confirmable = 8; */ public boolean hasConfirmable() { - return ((bitField0_ & 0x00000040) == 0x00000040); + return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional bool confirmable = 8; @@ -1183,25 +1152,25 @@ public final class MessageFormats { return confirmable_; } - // optional .ConfirmMessage confirmMessage = 9; + // optional .DeliveredMessage confirmMessage = 9; public static final int CONFIRMMESSAGE_FIELD_NUMBER = 9; - private akka.persistence.serialization.MessageFormats.ConfirmMessage confirmMessage_; + private akka.persistence.serialization.MessageFormats.DeliveredMessage confirmMessage_; /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ public boolean hasConfirmMessage() { - return ((bitField0_ & 0x00000080) == 0x00000080); + return ((bitField0_ & 0x00000040) == 0x00000040); } /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ - public akka.persistence.serialization.MessageFormats.ConfirmMessage getConfirmMessage() { + public akka.persistence.serialization.MessageFormats.DeliveredMessage getConfirmMessage() { return confirmMessage_; } /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ - public akka.persistence.serialization.MessageFormats.ConfirmMessageOrBuilder getConfirmMessageOrBuilder() { + public akka.persistence.serialization.MessageFormats.DeliveredMessageOrBuilder getConfirmMessageOrBuilder() { return confirmMessage_; } @@ -1212,7 +1181,7 @@ public final class MessageFormats { * optional string confirmTarget = 10; */ public boolean hasConfirmTarget() { - return ((bitField0_ & 0x00000100) == 0x00000100); + return ((bitField0_ & 0x00000080) == 0x00000080); } /** * optional string confirmTarget = 10; @@ -1255,7 +1224,7 @@ public final class MessageFormats { * optional string sender = 11; */ public boolean hasSender() { - return ((bitField0_ & 0x00000200) == 0x00000200); + return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional string sender = 11; @@ -1296,11 +1265,10 @@ public final class MessageFormats { sequenceNr_ = 0L; processorId_ = ""; deleted_ = false; - resolved_ = false; redeliveries_ = 0; confirms_ = com.google.protobuf.LazyStringArrayList.EMPTY; confirmable_ = false; - confirmMessage_ = akka.persistence.serialization.MessageFormats.ConfirmMessage.getDefaultInstance(); + confirmMessage_ = akka.persistence.serialization.MessageFormats.DeliveredMessage.getDefaultInstance(); confirmTarget_ = ""; sender_ = ""; } @@ -1335,24 +1303,21 @@ public final class MessageFormats { output.writeBool(4, deleted_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBool(5, resolved_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeInt32(6, redeliveries_); } for (int i = 0; i < confirms_.size(); i++) { output.writeBytes(7, confirms_.getByteString(i)); } - if (((bitField0_ & 0x00000040) == 0x00000040)) { + if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeBool(8, confirmable_); } - if (((bitField0_ & 0x00000080) == 0x00000080)) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeMessage(9, confirmMessage_); } - if (((bitField0_ & 0x00000100) == 0x00000100)) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { output.writeBytes(10, getConfirmTargetBytes()); } - if (((bitField0_ & 0x00000200) == 0x00000200)) { + if (((bitField0_ & 0x00000100) == 0x00000100)) { output.writeBytes(11, getSenderBytes()); } getUnknownFields().writeTo(output); @@ -1381,10 +1346,6 @@ public final class MessageFormats { .computeBoolSize(4, deleted_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(5, resolved_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(6, redeliveries_); } @@ -1397,19 +1358,19 @@ public final class MessageFormats { size += dataSize; size += 1 * getConfirmsList().size(); } - if (((bitField0_ & 0x00000040) == 0x00000040)) { + if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(8, confirmable_); } - if (((bitField0_ & 0x00000080) == 0x00000080)) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(9, confirmMessage_); } - if (((bitField0_ & 0x00000100) == 0x00000100)) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(10, getConfirmTargetBytes()); } - if (((bitField0_ & 0x00000200) == 0x00000200)) { + if (((bitField0_ & 0x00000100) == 0x00000100)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(11, getSenderBytes()); } @@ -1543,24 +1504,22 @@ public final class MessageFormats { bitField0_ = (bitField0_ & ~0x00000004); deleted_ = false; bitField0_ = (bitField0_ & ~0x00000008); - resolved_ = false; - bitField0_ = (bitField0_ & ~0x00000010); redeliveries_ = 0; - bitField0_ = (bitField0_ & ~0x00000020); + bitField0_ = (bitField0_ & ~0x00000010); confirms_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000040); + bitField0_ = (bitField0_ & ~0x00000020); confirmable_ = false; - bitField0_ = (bitField0_ & ~0x00000080); + bitField0_ = (bitField0_ & ~0x00000040); if (confirmMessageBuilder_ == null) { - confirmMessage_ = akka.persistence.serialization.MessageFormats.ConfirmMessage.getDefaultInstance(); + confirmMessage_ = akka.persistence.serialization.MessageFormats.DeliveredMessage.getDefaultInstance(); } else { confirmMessageBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000080); confirmTarget_ = ""; - bitField0_ = (bitField0_ & ~0x00000200); + bitField0_ = (bitField0_ & ~0x00000100); sender_ = ""; - bitField0_ = (bitField0_ & ~0x00000400); + bitField0_ = (bitField0_ & ~0x00000200); return this; } @@ -1612,35 +1571,31 @@ public final class MessageFormats { if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } - result.resolved_ = resolved_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } result.redeliveries_ = redeliveries_; - if (((bitField0_ & 0x00000040) == 0x00000040)) { + if (((bitField0_ & 0x00000020) == 0x00000020)) { confirms_ = new com.google.protobuf.UnmodifiableLazyStringList( confirms_); - bitField0_ = (bitField0_ & ~0x00000040); + bitField0_ = (bitField0_ & ~0x00000020); } result.confirms_ = confirms_; - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000040; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000020; } result.confirmable_ = confirmable_; - if (((from_bitField0_ & 0x00000100) == 0x00000100)) { - to_bitField0_ |= 0x00000080; + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000040; } if (confirmMessageBuilder_ == null) { result.confirmMessage_ = confirmMessage_; } else { result.confirmMessage_ = confirmMessageBuilder_.build(); } - if (((from_bitField0_ & 0x00000200) == 0x00000200)) { - to_bitField0_ |= 0x00000100; + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000080; } result.confirmTarget_ = confirmTarget_; - if (((from_bitField0_ & 0x00000400) == 0x00000400)) { - to_bitField0_ |= 0x00000200; + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000100; } result.sender_ = sender_; result.bitField0_ = to_bitField0_; @@ -1673,16 +1628,13 @@ public final class MessageFormats { if (other.hasDeleted()) { setDeleted(other.getDeleted()); } - if (other.hasResolved()) { - setResolved(other.getResolved()); - } if (other.hasRedeliveries()) { setRedeliveries(other.getRedeliveries()); } if (!other.confirms_.isEmpty()) { if (confirms_.isEmpty()) { confirms_ = other.confirms_; - bitField0_ = (bitField0_ & ~0x00000040); + bitField0_ = (bitField0_ & ~0x00000020); } else { ensureConfirmsIsMutable(); confirms_.addAll(other.confirms_); @@ -1696,12 +1648,12 @@ public final class MessageFormats { mergeConfirmMessage(other.getConfirmMessage()); } if (other.hasConfirmTarget()) { - bitField0_ |= 0x00000200; + bitField0_ |= 0x00000100; confirmTarget_ = other.confirmTarget_; onChanged(); } if (other.hasSender()) { - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000200; sender_ = other.sender_; onChanged(); } @@ -1995,46 +1947,13 @@ public final class MessageFormats { return this; } - // optional bool resolved = 5; - private boolean resolved_ ; - /** - * optional bool resolved = 5; - */ - public boolean hasResolved() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bool resolved = 5; - */ - public boolean getResolved() { - return resolved_; - } - /** - * optional bool resolved = 5; - */ - public Builder setResolved(boolean value) { - bitField0_ |= 0x00000010; - resolved_ = value; - onChanged(); - return this; - } - /** - * optional bool resolved = 5; - */ - public Builder clearResolved() { - bitField0_ = (bitField0_ & ~0x00000010); - resolved_ = false; - onChanged(); - return this; - } - // optional int32 redeliveries = 6; private int redeliveries_ ; /** * optional int32 redeliveries = 6; */ public boolean hasRedeliveries() { - return ((bitField0_ & 0x00000020) == 0x00000020); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional int32 redeliveries = 6; @@ -2046,7 +1965,7 @@ public final class MessageFormats { * optional int32 redeliveries = 6; */ public Builder setRedeliveries(int value) { - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000010; redeliveries_ = value; onChanged(); return this; @@ -2055,7 +1974,7 @@ public final class MessageFormats { * optional int32 redeliveries = 6; */ public Builder clearRedeliveries() { - bitField0_ = (bitField0_ & ~0x00000020); + bitField0_ = (bitField0_ & ~0x00000010); redeliveries_ = 0; onChanged(); return this; @@ -2064,9 +1983,9 @@ public final class MessageFormats { // repeated string confirms = 7; private com.google.protobuf.LazyStringList confirms_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureConfirmsIsMutable() { - if (!((bitField0_ & 0x00000040) == 0x00000040)) { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { confirms_ = new com.google.protobuf.LazyStringArrayList(confirms_); - bitField0_ |= 0x00000040; + bitField0_ |= 0x00000020; } } /** @@ -2136,7 +2055,7 @@ public final class MessageFormats { */ public Builder clearConfirms() { confirms_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000040); + bitField0_ = (bitField0_ & ~0x00000020); onChanged(); return this; } @@ -2160,7 +2079,7 @@ public final class MessageFormats { * optional bool confirmable = 8; */ public boolean hasConfirmable() { - return ((bitField0_ & 0x00000080) == 0x00000080); + return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional bool confirmable = 8; @@ -2172,7 +2091,7 @@ public final class MessageFormats { * optional bool confirmable = 8; */ public Builder setConfirmable(boolean value) { - bitField0_ |= 0x00000080; + bitField0_ |= 0x00000040; confirmable_ = value; onChanged(); return this; @@ -2181,26 +2100,26 @@ public final class MessageFormats { * optional bool confirmable = 8; */ public Builder clearConfirmable() { - bitField0_ = (bitField0_ & ~0x00000080); + bitField0_ = (bitField0_ & ~0x00000040); confirmable_ = false; onChanged(); return this; } - // optional .ConfirmMessage confirmMessage = 9; - private akka.persistence.serialization.MessageFormats.ConfirmMessage confirmMessage_ = akka.persistence.serialization.MessageFormats.ConfirmMessage.getDefaultInstance(); + // optional .DeliveredMessage confirmMessage = 9; + private akka.persistence.serialization.MessageFormats.DeliveredMessage confirmMessage_ = akka.persistence.serialization.MessageFormats.DeliveredMessage.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< - akka.persistence.serialization.MessageFormats.ConfirmMessage, akka.persistence.serialization.MessageFormats.ConfirmMessage.Builder, akka.persistence.serialization.MessageFormats.ConfirmMessageOrBuilder> confirmMessageBuilder_; + akka.persistence.serialization.MessageFormats.DeliveredMessage, akka.persistence.serialization.MessageFormats.DeliveredMessage.Builder, akka.persistence.serialization.MessageFormats.DeliveredMessageOrBuilder> confirmMessageBuilder_; /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ public boolean hasConfirmMessage() { - return ((bitField0_ & 0x00000100) == 0x00000100); + return ((bitField0_ & 0x00000080) == 0x00000080); } /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ - public akka.persistence.serialization.MessageFormats.ConfirmMessage getConfirmMessage() { + public akka.persistence.serialization.MessageFormats.DeliveredMessage getConfirmMessage() { if (confirmMessageBuilder_ == null) { return confirmMessage_; } else { @@ -2208,9 +2127,9 @@ public final class MessageFormats { } } /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ - public Builder setConfirmMessage(akka.persistence.serialization.MessageFormats.ConfirmMessage value) { + public Builder setConfirmMessage(akka.persistence.serialization.MessageFormats.DeliveredMessage value) { if (confirmMessageBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -2220,32 +2139,32 @@ public final class MessageFormats { } else { confirmMessageBuilder_.setMessage(value); } - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000080; return this; } /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ public Builder setConfirmMessage( - akka.persistence.serialization.MessageFormats.ConfirmMessage.Builder builderForValue) { + akka.persistence.serialization.MessageFormats.DeliveredMessage.Builder builderForValue) { if (confirmMessageBuilder_ == null) { confirmMessage_ = builderForValue.build(); onChanged(); } else { confirmMessageBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000080; return this; } /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ - public Builder mergeConfirmMessage(akka.persistence.serialization.MessageFormats.ConfirmMessage value) { + public Builder mergeConfirmMessage(akka.persistence.serialization.MessageFormats.DeliveredMessage value) { if (confirmMessageBuilder_ == null) { - if (((bitField0_ & 0x00000100) == 0x00000100) && - confirmMessage_ != akka.persistence.serialization.MessageFormats.ConfirmMessage.getDefaultInstance()) { + if (((bitField0_ & 0x00000080) == 0x00000080) && + confirmMessage_ != akka.persistence.serialization.MessageFormats.DeliveredMessage.getDefaultInstance()) { confirmMessage_ = - akka.persistence.serialization.MessageFormats.ConfirmMessage.newBuilder(confirmMessage_).mergeFrom(value).buildPartial(); + akka.persistence.serialization.MessageFormats.DeliveredMessage.newBuilder(confirmMessage_).mergeFrom(value).buildPartial(); } else { confirmMessage_ = value; } @@ -2253,34 +2172,34 @@ public final class MessageFormats { } else { confirmMessageBuilder_.mergeFrom(value); } - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000080; return this; } /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ public Builder clearConfirmMessage() { if (confirmMessageBuilder_ == null) { - confirmMessage_ = akka.persistence.serialization.MessageFormats.ConfirmMessage.getDefaultInstance(); + confirmMessage_ = akka.persistence.serialization.MessageFormats.DeliveredMessage.getDefaultInstance(); onChanged(); } else { confirmMessageBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000080); return this; } /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ - public akka.persistence.serialization.MessageFormats.ConfirmMessage.Builder getConfirmMessageBuilder() { - bitField0_ |= 0x00000100; + public akka.persistence.serialization.MessageFormats.DeliveredMessage.Builder getConfirmMessageBuilder() { + bitField0_ |= 0x00000080; onChanged(); return getConfirmMessageFieldBuilder().getBuilder(); } /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ - public akka.persistence.serialization.MessageFormats.ConfirmMessageOrBuilder getConfirmMessageOrBuilder() { + public akka.persistence.serialization.MessageFormats.DeliveredMessageOrBuilder getConfirmMessageOrBuilder() { if (confirmMessageBuilder_ != null) { return confirmMessageBuilder_.getMessageOrBuilder(); } else { @@ -2288,14 +2207,14 @@ public final class MessageFormats { } } /** - * optional .ConfirmMessage confirmMessage = 9; + * optional .DeliveredMessage confirmMessage = 9; */ private com.google.protobuf.SingleFieldBuilder< - akka.persistence.serialization.MessageFormats.ConfirmMessage, akka.persistence.serialization.MessageFormats.ConfirmMessage.Builder, akka.persistence.serialization.MessageFormats.ConfirmMessageOrBuilder> + akka.persistence.serialization.MessageFormats.DeliveredMessage, akka.persistence.serialization.MessageFormats.DeliveredMessage.Builder, akka.persistence.serialization.MessageFormats.DeliveredMessageOrBuilder> getConfirmMessageFieldBuilder() { if (confirmMessageBuilder_ == null) { confirmMessageBuilder_ = new com.google.protobuf.SingleFieldBuilder< - akka.persistence.serialization.MessageFormats.ConfirmMessage, akka.persistence.serialization.MessageFormats.ConfirmMessage.Builder, akka.persistence.serialization.MessageFormats.ConfirmMessageOrBuilder>( + akka.persistence.serialization.MessageFormats.DeliveredMessage, akka.persistence.serialization.MessageFormats.DeliveredMessage.Builder, akka.persistence.serialization.MessageFormats.DeliveredMessageOrBuilder>( confirmMessage_, getParentForChildren(), isClean()); @@ -2310,7 +2229,7 @@ public final class MessageFormats { * optional string confirmTarget = 10; */ public boolean hasConfirmTarget() { - return ((bitField0_ & 0x00000200) == 0x00000200); + return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional string confirmTarget = 10; @@ -2350,7 +2269,7 @@ public final class MessageFormats { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000200; + bitField0_ |= 0x00000100; confirmTarget_ = value; onChanged(); return this; @@ -2359,7 +2278,7 @@ public final class MessageFormats { * optional string confirmTarget = 10; */ public Builder clearConfirmTarget() { - bitField0_ = (bitField0_ & ~0x00000200); + bitField0_ = (bitField0_ & ~0x00000100); confirmTarget_ = getDefaultInstance().getConfirmTarget(); onChanged(); return this; @@ -2372,7 +2291,7 @@ public final class MessageFormats { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000200; + bitField0_ |= 0x00000100; confirmTarget_ = value; onChanged(); return this; @@ -2384,7 +2303,7 @@ public final class MessageFormats { * optional string sender = 11; */ public boolean hasSender() { - return ((bitField0_ & 0x00000400) == 0x00000400); + return ((bitField0_ & 0x00000200) == 0x00000200); } /** * optional string sender = 11; @@ -2424,7 +2343,7 @@ public final class MessageFormats { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000200; sender_ = value; onChanged(); return this; @@ -2433,7 +2352,7 @@ public final class MessageFormats { * optional string sender = 11; */ public Builder clearSender() { - bitField0_ = (bitField0_ & ~0x00000400); + bitField0_ = (bitField0_ & ~0x00000200); sender_ = getDefaultInstance().getSender(); onChanged(); return this; @@ -2446,7 +2365,7 @@ public final class MessageFormats { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000200; sender_ = value; onChanged(); return this; @@ -3043,7 +2962,7 @@ public final class MessageFormats { // @@protoc_insertion_point(class_scope:PersistentPayload) } - public interface ConfirmMessageOrBuilder + public interface DeliveredMessageOrBuilder extends com.google.protobuf.MessageOrBuilder { // optional string processorId = 1; @@ -3061,75 +2980,75 @@ public final class MessageFormats { com.google.protobuf.ByteString getProcessorIdBytes(); - // optional int64 messageSequenceNr = 2; + // optional string channelId = 2; /** - * optional int64 messageSequenceNr = 2; - */ - boolean hasMessageSequenceNr(); - /** - * optional int64 messageSequenceNr = 2; - */ - long getMessageSequenceNr(); - - // optional string channelId = 3; - /** - * optional string channelId = 3; + * optional string channelId = 2; */ boolean hasChannelId(); /** - * optional string channelId = 3; + * optional string channelId = 2; */ java.lang.String getChannelId(); /** - * optional string channelId = 3; + * optional string channelId = 2; */ com.google.protobuf.ByteString getChannelIdBytes(); - // optional int64 wrapperSequenceNr = 4; + // optional int64 persistentSequenceNr = 3; /** - * optional int64 wrapperSequenceNr = 4; + * optional int64 persistentSequenceNr = 3; */ - boolean hasWrapperSequenceNr(); + boolean hasPersistentSequenceNr(); /** - * optional int64 wrapperSequenceNr = 4; + * optional int64 persistentSequenceNr = 3; */ - long getWrapperSequenceNr(); + long getPersistentSequenceNr(); - // optional string channelEndpoint = 5; + // optional int64 deliverySequenceNr = 4; /** - * optional string channelEndpoint = 5; + * optional int64 deliverySequenceNr = 4; */ - boolean hasChannelEndpoint(); + boolean hasDeliverySequenceNr(); /** - * optional string channelEndpoint = 5; + * optional int64 deliverySequenceNr = 4; */ - java.lang.String getChannelEndpoint(); + long getDeliverySequenceNr(); + + // optional string channel = 5; /** - * optional string channelEndpoint = 5; + * optional string channel = 5; + */ + boolean hasChannel(); + /** + * optional string channel = 5; + */ + java.lang.String getChannel(); + /** + * optional string channel = 5; */ com.google.protobuf.ByteString - getChannelEndpointBytes(); + getChannelBytes(); } /** - * Protobuf type {@code ConfirmMessage} + * Protobuf type {@code DeliveredMessage} */ - public static final class ConfirmMessage extends + public static final class DeliveredMessage extends com.google.protobuf.GeneratedMessage - implements ConfirmMessageOrBuilder { - // Use ConfirmMessage.newBuilder() to construct. - private ConfirmMessage(com.google.protobuf.GeneratedMessage.Builder builder) { + implements DeliveredMessageOrBuilder { + // Use DeliveredMessage.newBuilder() to construct. + private DeliveredMessage(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ConfirmMessage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private DeliveredMessage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ConfirmMessage defaultInstance; - public static ConfirmMessage getDefaultInstance() { + private static final DeliveredMessage defaultInstance; + public static DeliveredMessage getDefaultInstance() { return defaultInstance; } - public ConfirmMessage getDefaultInstanceForType() { + public DeliveredMessage getDefaultInstanceForType() { return defaultInstance; } @@ -3139,7 +3058,7 @@ public final class MessageFormats { getUnknownFields() { return this.unknownFields; } - private ConfirmMessage( + private DeliveredMessage( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -3167,24 +3086,24 @@ public final class MessageFormats { processorId_ = input.readBytes(); break; } - case 16: { + case 18: { bitField0_ |= 0x00000002; - messageSequenceNr_ = input.readInt64(); + channelId_ = input.readBytes(); break; } - case 26: { + case 24: { bitField0_ |= 0x00000004; - channelId_ = input.readBytes(); + persistentSequenceNr_ = input.readInt64(); break; } case 32: { bitField0_ |= 0x00000008; - wrapperSequenceNr_ = input.readInt64(); + deliverySequenceNr_ = input.readInt64(); break; } case 42: { bitField0_ |= 0x00000010; - channelEndpoint_ = input.readBytes(); + channel_ = input.readBytes(); break; } } @@ -3201,28 +3120,28 @@ public final class MessageFormats { } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return akka.persistence.serialization.MessageFormats.internal_static_ConfirmMessage_descriptor; + return akka.persistence.serialization.MessageFormats.internal_static_DeliveredMessage_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return akka.persistence.serialization.MessageFormats.internal_static_ConfirmMessage_fieldAccessorTable + return akka.persistence.serialization.MessageFormats.internal_static_DeliveredMessage_fieldAccessorTable .ensureFieldAccessorsInitialized( - akka.persistence.serialization.MessageFormats.ConfirmMessage.class, akka.persistence.serialization.MessageFormats.ConfirmMessage.Builder.class); + akka.persistence.serialization.MessageFormats.DeliveredMessage.class, akka.persistence.serialization.MessageFormats.DeliveredMessage.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ConfirmMessage parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public DeliveredMessage parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ConfirmMessage(input, extensionRegistry); + return new DeliveredMessage(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @@ -3270,33 +3189,17 @@ public final class MessageFormats { } } - // optional int64 messageSequenceNr = 2; - public static final int MESSAGESEQUENCENR_FIELD_NUMBER = 2; - private long messageSequenceNr_; + // optional string channelId = 2; + public static final int CHANNELID_FIELD_NUMBER = 2; + private java.lang.Object channelId_; /** - * optional int64 messageSequenceNr = 2; + * optional string channelId = 2; */ - public boolean hasMessageSequenceNr() { + public boolean hasChannelId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional int64 messageSequenceNr = 2; - */ - public long getMessageSequenceNr() { - return messageSequenceNr_; - } - - // optional string channelId = 3; - public static final int CHANNELID_FIELD_NUMBER = 3; - private java.lang.Object channelId_; - /** - * optional string channelId = 3; - */ - public boolean hasChannelId() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional string channelId = 3; + * optional string channelId = 2; */ public java.lang.String getChannelId() { java.lang.Object ref = channelId_; @@ -3313,7 +3216,7 @@ public final class MessageFormats { } } /** - * optional string channelId = 3; + * optional string channelId = 2; */ public com.google.protobuf.ByteString getChannelIdBytes() { @@ -3329,36 +3232,52 @@ public final class MessageFormats { } } - // optional int64 wrapperSequenceNr = 4; - public static final int WRAPPERSEQUENCENR_FIELD_NUMBER = 4; - private long wrapperSequenceNr_; + // optional int64 persistentSequenceNr = 3; + public static final int PERSISTENTSEQUENCENR_FIELD_NUMBER = 3; + private long persistentSequenceNr_; /** - * optional int64 wrapperSequenceNr = 4; + * optional int64 persistentSequenceNr = 3; */ - public boolean hasWrapperSequenceNr() { + public boolean hasPersistentSequenceNr() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 persistentSequenceNr = 3; + */ + public long getPersistentSequenceNr() { + return persistentSequenceNr_; + } + + // optional int64 deliverySequenceNr = 4; + public static final int DELIVERYSEQUENCENR_FIELD_NUMBER = 4; + private long deliverySequenceNr_; + /** + * optional int64 deliverySequenceNr = 4; + */ + public boolean hasDeliverySequenceNr() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** - * optional int64 wrapperSequenceNr = 4; + * optional int64 deliverySequenceNr = 4; */ - public long getWrapperSequenceNr() { - return wrapperSequenceNr_; + public long getDeliverySequenceNr() { + return deliverySequenceNr_; } - // optional string channelEndpoint = 5; - public static final int CHANNELENDPOINT_FIELD_NUMBER = 5; - private java.lang.Object channelEndpoint_; + // optional string channel = 5; + public static final int CHANNEL_FIELD_NUMBER = 5; + private java.lang.Object channel_; /** - * optional string channelEndpoint = 5; + * optional string channel = 5; */ - public boolean hasChannelEndpoint() { + public boolean hasChannel() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** - * optional string channelEndpoint = 5; + * optional string channel = 5; */ - public java.lang.String getChannelEndpoint() { - java.lang.Object ref = channelEndpoint_; + public java.lang.String getChannel() { + java.lang.Object ref = channel_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -3366,22 +3285,22 @@ public final class MessageFormats { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - channelEndpoint_ = s; + channel_ = s; } return s; } } /** - * optional string channelEndpoint = 5; + * optional string channel = 5; */ public com.google.protobuf.ByteString - getChannelEndpointBytes() { - java.lang.Object ref = channelEndpoint_; + getChannelBytes() { + java.lang.Object ref = channel_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - channelEndpoint_ = b; + channel_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -3390,10 +3309,10 @@ public final class MessageFormats { private void initFields() { processorId_ = ""; - messageSequenceNr_ = 0L; channelId_ = ""; - wrapperSequenceNr_ = 0L; - channelEndpoint_ = ""; + persistentSequenceNr_ = 0L; + deliverySequenceNr_ = 0L; + channel_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -3411,16 +3330,16 @@ public final class MessageFormats { output.writeBytes(1, getProcessorIdBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeInt64(2, messageSequenceNr_); + output.writeBytes(2, getChannelIdBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getChannelIdBytes()); + output.writeInt64(3, persistentSequenceNr_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeInt64(4, wrapperSequenceNr_); + output.writeInt64(4, deliverySequenceNr_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBytes(5, getChannelEndpointBytes()); + output.writeBytes(5, getChannelBytes()); } getUnknownFields().writeTo(output); } @@ -3437,19 +3356,19 @@ public final class MessageFormats { } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeInt64Size(2, messageSequenceNr_); + .computeBytesSize(2, getChannelIdBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getChannelIdBytes()); + .computeInt64Size(3, persistentSequenceNr_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream - .computeInt64Size(4, wrapperSequenceNr_); + .computeInt64Size(4, deliverySequenceNr_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(5, getChannelEndpointBytes()); + .computeBytesSize(5, getChannelBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -3463,53 +3382,53 @@ public final class MessageFormats { return super.writeReplace(); } - public static akka.persistence.serialization.MessageFormats.ConfirmMessage parseFrom( + public static akka.persistence.serialization.MessageFormats.DeliveredMessage parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static akka.persistence.serialization.MessageFormats.ConfirmMessage parseFrom( + public static akka.persistence.serialization.MessageFormats.DeliveredMessage parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static akka.persistence.serialization.MessageFormats.ConfirmMessage parseFrom(byte[] data) + public static akka.persistence.serialization.MessageFormats.DeliveredMessage parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static akka.persistence.serialization.MessageFormats.ConfirmMessage parseFrom( + public static akka.persistence.serialization.MessageFormats.DeliveredMessage parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static akka.persistence.serialization.MessageFormats.ConfirmMessage parseFrom(java.io.InputStream input) + public static akka.persistence.serialization.MessageFormats.DeliveredMessage parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static akka.persistence.serialization.MessageFormats.ConfirmMessage parseFrom( + public static akka.persistence.serialization.MessageFormats.DeliveredMessage parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static akka.persistence.serialization.MessageFormats.ConfirmMessage parseDelimitedFrom(java.io.InputStream input) + public static akka.persistence.serialization.MessageFormats.DeliveredMessage parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static akka.persistence.serialization.MessageFormats.ConfirmMessage parseDelimitedFrom( + public static akka.persistence.serialization.MessageFormats.DeliveredMessage parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static akka.persistence.serialization.MessageFormats.ConfirmMessage parseFrom( + public static akka.persistence.serialization.MessageFormats.DeliveredMessage parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static akka.persistence.serialization.MessageFormats.ConfirmMessage parseFrom( + public static akka.persistence.serialization.MessageFormats.DeliveredMessage parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -3518,7 +3437,7 @@ public final class MessageFormats { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(akka.persistence.serialization.MessageFormats.ConfirmMessage prototype) { + public static Builder newBuilder(akka.persistence.serialization.MessageFormats.DeliveredMessage prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -3530,24 +3449,24 @@ public final class MessageFormats { return builder; } /** - * Protobuf type {@code ConfirmMessage} + * Protobuf type {@code DeliveredMessage} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements akka.persistence.serialization.MessageFormats.ConfirmMessageOrBuilder { + implements akka.persistence.serialization.MessageFormats.DeliveredMessageOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return akka.persistence.serialization.MessageFormats.internal_static_ConfirmMessage_descriptor; + return akka.persistence.serialization.MessageFormats.internal_static_DeliveredMessage_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return akka.persistence.serialization.MessageFormats.internal_static_ConfirmMessage_fieldAccessorTable + return akka.persistence.serialization.MessageFormats.internal_static_DeliveredMessage_fieldAccessorTable .ensureFieldAccessorsInitialized( - akka.persistence.serialization.MessageFormats.ConfirmMessage.class, akka.persistence.serialization.MessageFormats.ConfirmMessage.Builder.class); + akka.persistence.serialization.MessageFormats.DeliveredMessage.class, akka.persistence.serialization.MessageFormats.DeliveredMessage.Builder.class); } - // Construct using akka.persistence.serialization.MessageFormats.ConfirmMessage.newBuilder() + // Construct using akka.persistence.serialization.MessageFormats.DeliveredMessage.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -3569,13 +3488,13 @@ public final class MessageFormats { super.clear(); processorId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - messageSequenceNr_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); channelId_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + persistentSequenceNr_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); - wrapperSequenceNr_ = 0L; + deliverySequenceNr_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); - channelEndpoint_ = ""; + channel_ = ""; bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -3586,23 +3505,23 @@ public final class MessageFormats { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return akka.persistence.serialization.MessageFormats.internal_static_ConfirmMessage_descriptor; + return akka.persistence.serialization.MessageFormats.internal_static_DeliveredMessage_descriptor; } - public akka.persistence.serialization.MessageFormats.ConfirmMessage getDefaultInstanceForType() { - return akka.persistence.serialization.MessageFormats.ConfirmMessage.getDefaultInstance(); + public akka.persistence.serialization.MessageFormats.DeliveredMessage getDefaultInstanceForType() { + return akka.persistence.serialization.MessageFormats.DeliveredMessage.getDefaultInstance(); } - public akka.persistence.serialization.MessageFormats.ConfirmMessage build() { - akka.persistence.serialization.MessageFormats.ConfirmMessage result = buildPartial(); + public akka.persistence.serialization.MessageFormats.DeliveredMessage build() { + akka.persistence.serialization.MessageFormats.DeliveredMessage result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public akka.persistence.serialization.MessageFormats.ConfirmMessage buildPartial() { - akka.persistence.serialization.MessageFormats.ConfirmMessage result = new akka.persistence.serialization.MessageFormats.ConfirmMessage(this); + public akka.persistence.serialization.MessageFormats.DeliveredMessage buildPartial() { + akka.persistence.serialization.MessageFormats.DeliveredMessage result = new akka.persistence.serialization.MessageFormats.DeliveredMessage(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { @@ -3612,54 +3531,54 @@ public final class MessageFormats { if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.messageSequenceNr_ = messageSequenceNr_; + result.channelId_ = channelId_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } - result.channelId_ = channelId_; + result.persistentSequenceNr_ = persistentSequenceNr_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } - result.wrapperSequenceNr_ = wrapperSequenceNr_; + result.deliverySequenceNr_ = deliverySequenceNr_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } - result.channelEndpoint_ = channelEndpoint_; + result.channel_ = channel_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof akka.persistence.serialization.MessageFormats.ConfirmMessage) { - return mergeFrom((akka.persistence.serialization.MessageFormats.ConfirmMessage)other); + if (other instanceof akka.persistence.serialization.MessageFormats.DeliveredMessage) { + return mergeFrom((akka.persistence.serialization.MessageFormats.DeliveredMessage)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(akka.persistence.serialization.MessageFormats.ConfirmMessage other) { - if (other == akka.persistence.serialization.MessageFormats.ConfirmMessage.getDefaultInstance()) return this; + public Builder mergeFrom(akka.persistence.serialization.MessageFormats.DeliveredMessage other) { + if (other == akka.persistence.serialization.MessageFormats.DeliveredMessage.getDefaultInstance()) return this; if (other.hasProcessorId()) { bitField0_ |= 0x00000001; processorId_ = other.processorId_; onChanged(); } - if (other.hasMessageSequenceNr()) { - setMessageSequenceNr(other.getMessageSequenceNr()); - } if (other.hasChannelId()) { - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000002; channelId_ = other.channelId_; onChanged(); } - if (other.hasWrapperSequenceNr()) { - setWrapperSequenceNr(other.getWrapperSequenceNr()); + if (other.hasPersistentSequenceNr()) { + setPersistentSequenceNr(other.getPersistentSequenceNr()); } - if (other.hasChannelEndpoint()) { + if (other.hasDeliverySequenceNr()) { + setDeliverySequenceNr(other.getDeliverySequenceNr()); + } + if (other.hasChannel()) { bitField0_ |= 0x00000010; - channelEndpoint_ = other.channelEndpoint_; + channel_ = other.channel_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); @@ -3674,11 +3593,11 @@ public final class MessageFormats { com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - akka.persistence.serialization.MessageFormats.ConfirmMessage parsedMessage = null; + akka.persistence.serialization.MessageFormats.DeliveredMessage parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (akka.persistence.serialization.MessageFormats.ConfirmMessage) e.getUnfinishedMessage(); + parsedMessage = (akka.persistence.serialization.MessageFormats.DeliveredMessage) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -3763,49 +3682,16 @@ public final class MessageFormats { return this; } - // optional int64 messageSequenceNr = 2; - private long messageSequenceNr_ ; + // optional string channelId = 2; + private java.lang.Object channelId_ = ""; /** - * optional int64 messageSequenceNr = 2; + * optional string channelId = 2; */ - public boolean hasMessageSequenceNr() { + public boolean hasChannelId() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional int64 messageSequenceNr = 2; - */ - public long getMessageSequenceNr() { - return messageSequenceNr_; - } - /** - * optional int64 messageSequenceNr = 2; - */ - public Builder setMessageSequenceNr(long value) { - bitField0_ |= 0x00000002; - messageSequenceNr_ = value; - onChanged(); - return this; - } - /** - * optional int64 messageSequenceNr = 2; - */ - public Builder clearMessageSequenceNr() { - bitField0_ = (bitField0_ & ~0x00000002); - messageSequenceNr_ = 0L; - onChanged(); - return this; - } - - // optional string channelId = 3; - private java.lang.Object channelId_ = ""; - /** - * optional string channelId = 3; - */ - public boolean hasChannelId() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional string channelId = 3; + * optional string channelId = 2; */ public java.lang.String getChannelId() { java.lang.Object ref = channelId_; @@ -3819,7 +3705,7 @@ public final class MessageFormats { } } /** - * optional string channelId = 3; + * optional string channelId = 2; */ public com.google.protobuf.ByteString getChannelIdBytes() { @@ -3835,157 +3721,190 @@ public final class MessageFormats { } } /** - * optional string channelId = 3; + * optional string channelId = 2; */ public Builder setChannelId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000002; channelId_ = value; onChanged(); return this; } /** - * optional string channelId = 3; + * optional string channelId = 2; */ public Builder clearChannelId() { - bitField0_ = (bitField0_ & ~0x00000004); + bitField0_ = (bitField0_ & ~0x00000002); channelId_ = getDefaultInstance().getChannelId(); onChanged(); return this; } /** - * optional string channelId = 3; + * optional string channelId = 2; */ public Builder setChannelIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000002; channelId_ = value; onChanged(); return this; } - // optional int64 wrapperSequenceNr = 4; - private long wrapperSequenceNr_ ; + // optional int64 persistentSequenceNr = 3; + private long persistentSequenceNr_ ; /** - * optional int64 wrapperSequenceNr = 4; + * optional int64 persistentSequenceNr = 3; */ - public boolean hasWrapperSequenceNr() { - return ((bitField0_ & 0x00000008) == 0x00000008); + public boolean hasPersistentSequenceNr() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * optional int64 wrapperSequenceNr = 4; + * optional int64 persistentSequenceNr = 3; */ - public long getWrapperSequenceNr() { - return wrapperSequenceNr_; + public long getPersistentSequenceNr() { + return persistentSequenceNr_; } /** - * optional int64 wrapperSequenceNr = 4; + * optional int64 persistentSequenceNr = 3; */ - public Builder setWrapperSequenceNr(long value) { - bitField0_ |= 0x00000008; - wrapperSequenceNr_ = value; + public Builder setPersistentSequenceNr(long value) { + bitField0_ |= 0x00000004; + persistentSequenceNr_ = value; onChanged(); return this; } /** - * optional int64 wrapperSequenceNr = 4; + * optional int64 persistentSequenceNr = 3; */ - public Builder clearWrapperSequenceNr() { - bitField0_ = (bitField0_ & ~0x00000008); - wrapperSequenceNr_ = 0L; + public Builder clearPersistentSequenceNr() { + bitField0_ = (bitField0_ & ~0x00000004); + persistentSequenceNr_ = 0L; onChanged(); return this; } - // optional string channelEndpoint = 5; - private java.lang.Object channelEndpoint_ = ""; + // optional int64 deliverySequenceNr = 4; + private long deliverySequenceNr_ ; /** - * optional string channelEndpoint = 5; + * optional int64 deliverySequenceNr = 4; */ - public boolean hasChannelEndpoint() { + public boolean hasDeliverySequenceNr() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 deliverySequenceNr = 4; + */ + public long getDeliverySequenceNr() { + return deliverySequenceNr_; + } + /** + * optional int64 deliverySequenceNr = 4; + */ + public Builder setDeliverySequenceNr(long value) { + bitField0_ |= 0x00000008; + deliverySequenceNr_ = value; + onChanged(); + return this; + } + /** + * optional int64 deliverySequenceNr = 4; + */ + public Builder clearDeliverySequenceNr() { + bitField0_ = (bitField0_ & ~0x00000008); + deliverySequenceNr_ = 0L; + onChanged(); + return this; + } + + // optional string channel = 5; + private java.lang.Object channel_ = ""; + /** + * optional string channel = 5; + */ + public boolean hasChannel() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** - * optional string channelEndpoint = 5; + * optional string channel = 5; */ - public java.lang.String getChannelEndpoint() { - java.lang.Object ref = channelEndpoint_; + public java.lang.String getChannel() { + java.lang.Object ref = channel_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); - channelEndpoint_ = s; + channel_ = s; return s; } else { return (java.lang.String) ref; } } /** - * optional string channelEndpoint = 5; + * optional string channel = 5; */ public com.google.protobuf.ByteString - getChannelEndpointBytes() { - java.lang.Object ref = channelEndpoint_; + getChannelBytes() { + java.lang.Object ref = channel_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - channelEndpoint_ = b; + channel_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** - * optional string channelEndpoint = 5; + * optional string channel = 5; */ - public Builder setChannelEndpoint( + public Builder setChannel( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; - channelEndpoint_ = value; + channel_ = value; onChanged(); return this; } /** - * optional string channelEndpoint = 5; + * optional string channel = 5; */ - public Builder clearChannelEndpoint() { + public Builder clearChannel() { bitField0_ = (bitField0_ & ~0x00000010); - channelEndpoint_ = getDefaultInstance().getChannelEndpoint(); + channel_ = getDefaultInstance().getChannel(); onChanged(); return this; } /** - * optional string channelEndpoint = 5; + * optional string channel = 5; */ - public Builder setChannelEndpointBytes( + public Builder setChannelBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; - channelEndpoint_ = value; + channel_ = value; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:ConfirmMessage) + // @@protoc_insertion_point(builder_scope:DeliveredMessage) } static { - defaultInstance = new ConfirmMessage(true); + defaultInstance = new DeliveredMessage(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:ConfirmMessage) + // @@protoc_insertion_point(class_scope:DeliveredMessage) } public interface DeliverMessageOrBuilder @@ -4019,16 +3938,6 @@ public final class MessageFormats { */ com.google.protobuf.ByteString getDestinationBytes(); - - // optional .DeliverMessage.ResolveStrategy resolve = 3; - /** - * optional .DeliverMessage.ResolveStrategy resolve = 3; - */ - boolean hasResolve(); - /** - * optional .DeliverMessage.ResolveStrategy resolve = 3; - */ - akka.persistence.serialization.MessageFormats.DeliverMessage.ResolveStrategy getResolve(); } /** * Protobuf type {@code DeliverMessage} @@ -4099,17 +4008,6 @@ public final class MessageFormats { destination_ = input.readBytes(); break; } - case 24: { - int rawValue = input.readEnum(); - akka.persistence.serialization.MessageFormats.DeliverMessage.ResolveStrategy value = akka.persistence.serialization.MessageFormats.DeliverMessage.ResolveStrategy.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(3, rawValue); - } else { - bitField0_ |= 0x00000004; - resolve_ = value; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -4149,97 +4047,6 @@ public final class MessageFormats { return PARSER; } - /** - * Protobuf enum {@code DeliverMessage.ResolveStrategy} - */ - public enum ResolveStrategy - implements com.google.protobuf.ProtocolMessageEnum { - /** - * Off = 1; - */ - Off(0, 1), - /** - * Sender = 2; - */ - Sender(1, 2), - /** - * Destination = 3; - */ - Destination(2, 3), - ; - - /** - * Off = 1; - */ - public static final int Off_VALUE = 1; - /** - * Sender = 2; - */ - public static final int Sender_VALUE = 2; - /** - * Destination = 3; - */ - public static final int Destination_VALUE = 3; - - - public final int getNumber() { return value; } - - public static ResolveStrategy valueOf(int value) { - switch (value) { - case 1: return Off; - case 2: return Sender; - case 3: return Destination; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public ResolveStrategy findValueByNumber(int number) { - return ResolveStrategy.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return akka.persistence.serialization.MessageFormats.DeliverMessage.getDescriptor().getEnumTypes().get(0); - } - - private static final ResolveStrategy[] VALUES = values(); - - public static ResolveStrategy valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private ResolveStrategy(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:DeliverMessage.ResolveStrategy) - } - private int bitField0_; // optional .PersistentMessage persistent = 1; public static final int PERSISTENT_FIELD_NUMBER = 1; @@ -4306,26 +4113,9 @@ public final class MessageFormats { } } - // optional .DeliverMessage.ResolveStrategy resolve = 3; - public static final int RESOLVE_FIELD_NUMBER = 3; - private akka.persistence.serialization.MessageFormats.DeliverMessage.ResolveStrategy resolve_; - /** - * optional .DeliverMessage.ResolveStrategy resolve = 3; - */ - public boolean hasResolve() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional .DeliverMessage.ResolveStrategy resolve = 3; - */ - public akka.persistence.serialization.MessageFormats.DeliverMessage.ResolveStrategy getResolve() { - return resolve_; - } - private void initFields() { persistent_ = akka.persistence.serialization.MessageFormats.PersistentMessage.getDefaultInstance(); destination_ = ""; - resolve_ = akka.persistence.serialization.MessageFormats.DeliverMessage.ResolveStrategy.Off; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -4351,9 +4141,6 @@ public final class MessageFormats { if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getDestinationBytes()); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeEnum(3, resolve_.getNumber()); - } getUnknownFields().writeTo(output); } @@ -4371,10 +4158,6 @@ public final class MessageFormats { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, getDestinationBytes()); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(3, resolve_.getNumber()); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -4500,8 +4283,6 @@ public final class MessageFormats { bitField0_ = (bitField0_ & ~0x00000001); destination_ = ""; bitField0_ = (bitField0_ & ~0x00000002); - resolve_ = akka.persistence.serialization.MessageFormats.DeliverMessage.ResolveStrategy.Off; - bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -4542,10 +4323,6 @@ public final class MessageFormats { to_bitField0_ |= 0x00000002; } result.destination_ = destination_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.resolve_ = resolve_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -4570,9 +4347,6 @@ public final class MessageFormats { destination_ = other.destination_; onChanged(); } - if (other.hasResolve()) { - setResolve(other.getResolve()); - } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -4797,42 +4571,6 @@ public final class MessageFormats { return this; } - // optional .DeliverMessage.ResolveStrategy resolve = 3; - private akka.persistence.serialization.MessageFormats.DeliverMessage.ResolveStrategy resolve_ = akka.persistence.serialization.MessageFormats.DeliverMessage.ResolveStrategy.Off; - /** - * optional .DeliverMessage.ResolveStrategy resolve = 3; - */ - public boolean hasResolve() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional .DeliverMessage.ResolveStrategy resolve = 3; - */ - public akka.persistence.serialization.MessageFormats.DeliverMessage.ResolveStrategy getResolve() { - return resolve_; - } - /** - * optional .DeliverMessage.ResolveStrategy resolve = 3; - */ - public Builder setResolve(akka.persistence.serialization.MessageFormats.DeliverMessage.ResolveStrategy value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - resolve_ = value; - onChanged(); - return this; - } - /** - * optional .DeliverMessage.ResolveStrategy resolve = 3; - */ - public Builder clearResolve() { - bitField0_ = (bitField0_ & ~0x00000004); - resolve_ = akka.persistence.serialization.MessageFormats.DeliverMessage.ResolveStrategy.Off; - onChanged(); - return this; - } - // @@protoc_insertion_point(builder_scope:DeliverMessage) } @@ -4860,10 +4598,10 @@ public final class MessageFormats { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_PersistentPayload_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor - internal_static_ConfirmMessage_descriptor; + internal_static_DeliveredMessage_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ConfirmMessage_fieldAccessorTable; + internal_static_DeliveredMessage_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_DeliverMessage_descriptor; private static @@ -4880,25 +4618,21 @@ public final class MessageFormats { java.lang.String[] descriptorData = { "\n\024MessageFormats.proto\";\n\026PersistentMess" + "ageBatch\022!\n\005batch\030\001 \003(\0132\022.PersistentMess" + - "age\"\221\002\n\021PersistentMessage\022#\n\007payload\030\001 \001" + + "age\"\201\002\n\021PersistentMessage\022#\n\007payload\030\001 \001" + "(\0132\022.PersistentPayload\022\022\n\nsequenceNr\030\002 \001" + "(\003\022\023\n\013processorId\030\003 \001(\t\022\017\n\007deleted\030\004 \001(\010" + - "\022\020\n\010resolved\030\005 \001(\010\022\024\n\014redeliveries\030\006 \001(\005" + - "\022\020\n\010confirms\030\007 \003(\t\022\023\n\013confirmable\030\010 \001(\010\022" + - "\'\n\016confirmMessage\030\t \001(\0132\017.ConfirmMessage" + - "\022\025\n\rconfirmTarget\030\n \001(\t\022\016\n\006sender\030\013 \001(\t\"" + - "S\n\021PersistentPayload\022\024\n\014serializerId\030\001 \002", - "(\005\022\017\n\007payload\030\002 \002(\014\022\027\n\017payloadManifest\030\003" + - " \001(\014\"\207\001\n\016ConfirmMessage\022\023\n\013processorId\030\001" + - " \001(\t\022\031\n\021messageSequenceNr\030\002 \001(\003\022\021\n\tchann" + - "elId\030\003 \001(\t\022\031\n\021wrapperSequenceNr\030\004 \001(\003\022\027\n" + - "\017channelEndpoint\030\005 \001(\t\"\270\001\n\016DeliverMessag" + - "e\022&\n\npersistent\030\001 \001(\0132\022.PersistentMessag" + - "e\022\023\n\013destination\030\002 \001(\t\0220\n\007resolve\030\003 \001(\0162" + - "\037.DeliverMessage.ResolveStrategy\"7\n\017Reso" + - "lveStrategy\022\007\n\003Off\020\001\022\n\n\006Sender\020\002\022\017\n\013Dest" + - "ination\020\003B\"\n\036akka.persistence.serializat", - "ionH\001" + "\022\024\n\014redeliveries\030\006 \001(\005\022\020\n\010confirms\030\007 \003(\t" + + "\022\023\n\013confirmable\030\010 \001(\010\022)\n\016confirmMessage\030" + + "\t \001(\0132\021.DeliveredMessage\022\025\n\rconfirmTarge" + + "t\030\n \001(\t\022\016\n\006sender\030\013 \001(\t\"S\n\021PersistentPay" + + "load\022\024\n\014serializerId\030\001 \002(\005\022\017\n\007payload\030\002 ", + "\002(\014\022\027\n\017payloadManifest\030\003 \001(\014\"\205\001\n\020Deliver" + + "edMessage\022\023\n\013processorId\030\001 \001(\t\022\021\n\tchanne" + + "lId\030\002 \001(\t\022\034\n\024persistentSequenceNr\030\003 \001(\003\022" + + "\032\n\022deliverySequenceNr\030\004 \001(\003\022\017\n\007channel\030\005" + + " \001(\t\"M\n\016DeliverMessage\022&\n\npersistent\030\001 \001" + + "(\0132\022.PersistentMessage\022\023\n\013destination\030\002 " + + "\001(\tB\"\n\036akka.persistence.serializationH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -4916,25 +4650,25 @@ public final class MessageFormats { internal_static_PersistentMessage_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_PersistentMessage_descriptor, - new java.lang.String[] { "Payload", "SequenceNr", "ProcessorId", "Deleted", "Resolved", "Redeliveries", "Confirms", "Confirmable", "ConfirmMessage", "ConfirmTarget", "Sender", }); + new java.lang.String[] { "Payload", "SequenceNr", "ProcessorId", "Deleted", "Redeliveries", "Confirms", "Confirmable", "ConfirmMessage", "ConfirmTarget", "Sender", }); internal_static_PersistentPayload_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_PersistentPayload_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_PersistentPayload_descriptor, new java.lang.String[] { "SerializerId", "Payload", "PayloadManifest", }); - internal_static_ConfirmMessage_descriptor = + internal_static_DeliveredMessage_descriptor = getDescriptor().getMessageTypes().get(3); - internal_static_ConfirmMessage_fieldAccessorTable = new + internal_static_DeliveredMessage_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ConfirmMessage_descriptor, - new java.lang.String[] { "ProcessorId", "MessageSequenceNr", "ChannelId", "WrapperSequenceNr", "ChannelEndpoint", }); + internal_static_DeliveredMessage_descriptor, + new java.lang.String[] { "ProcessorId", "ChannelId", "PersistentSequenceNr", "DeliverySequenceNr", "Channel", }); internal_static_DeliverMessage_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_DeliverMessage_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DeliverMessage_descriptor, - new java.lang.String[] { "Persistent", "Destination", "Resolve", }); + new java.lang.String[] { "Persistent", "Destination", }); return null; } }; diff --git a/akka-persistence/src/main/protobuf/MessageFormats.proto b/akka-persistence/src/main/protobuf/MessageFormats.proto index 668a92796a..13351804dd 100644 --- a/akka-persistence/src/main/protobuf/MessageFormats.proto +++ b/akka-persistence/src/main/protobuf/MessageFormats.proto @@ -14,11 +14,10 @@ message PersistentMessage { optional int64 sequenceNr = 2; optional string processorId = 3; optional bool deleted = 4; - optional bool resolved = 5; optional int32 redeliveries = 6; repeated string confirms = 7; optional bool confirmable = 8; - optional ConfirmMessage confirmMessage = 9; + optional DeliveredMessage confirmMessage = 9; optional string confirmTarget = 10; optional string sender = 11; } @@ -29,22 +28,15 @@ message PersistentPayload { optional bytes payloadManifest = 3; } -message ConfirmMessage { +message DeliveredMessage { optional string processorId = 1; - optional int64 messageSequenceNr = 2; - optional string channelId = 3; - optional int64 wrapperSequenceNr = 4; - optional string channelEndpoint = 5; + optional string channelId = 2; + optional int64 persistentSequenceNr = 3; + optional int64 deliverySequenceNr = 4; + optional string channel = 5; } message DeliverMessage { - enum ResolveStrategy { - Off = 1; - Sender = 2; - Destination = 3; - } - optional PersistentMessage persistent = 1; optional string destination = 2; - optional ResolveStrategy resolve = 3; } \ No newline at end of file diff --git a/akka-persistence/src/main/resources/reference.conf b/akka-persistence/src/main/resources/reference.conf index 90dabbe45a..6f07b4386f 100644 --- a/akka-persistence/src/main/resources/reference.conf +++ b/akka-persistence/src/main/resources/reference.conf @@ -30,7 +30,13 @@ akka { # Only applies to internally created batches by processors that receive # persistent messages individually. Application-defined batches, even if # larger than this setting, are always written as a single isolated batch. - max-batch-size = 200 + max-message-batch-size = 200 + + # Maximum size of a confirmation batch written to the journal. + max-confirmation-batch-size = 10000 + + # Maximum size of a deletion batch written to the journal. + max-deletion-batch-size = 10000 # Path to the journal plugin to be used plugin = "akka.persistence.journal.leveldb" @@ -61,7 +67,7 @@ akka { dir = "journal" # Use fsync on write - fsync = off + fsync = on # Verify checksum on read. checksum = off @@ -91,7 +97,7 @@ akka { dir = "journal" # Use fsync on write - fsync = off + fsync = on # Verify checksum on read. checksum = off @@ -124,6 +130,19 @@ akka { } } + view { + + # Automated incremental view update. + auto-update = on + + # Interval between incremental updates + auto-update-interval = 5s + + # Maximum number of messages to replay per incremental view update. Set to + # -1 for no upper limit. + auto-update-replay-max = -1 + } + dispatchers { default-plugin-dispatcher { type = PinnedDispatcher diff --git a/akka-persistence/src/main/scala/akka/persistence/Channel.scala b/akka-persistence/src/main/scala/akka/persistence/Channel.scala index c4546f0532..061819819c 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Channel.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Channel.scala @@ -4,64 +4,67 @@ package akka.persistence +import java.lang.{ Iterable ⇒ JIterable } + import scala.collection.immutable +import scala.collection.JavaConverters._ import scala.concurrent.duration._ import scala.language.postfixOps import akka.actor._ -import akka.dispatch.Envelope -import akka.persistence.JournalProtocol.Confirm import akka.persistence.serialization.Message +import akka.persistence.JournalProtocol._ /** * A [[Channel]] configuration object. * - * @param redeliverMax maximum number of redeliveries (default is 5). - * @param redeliverInterval interval between redeliveries (default is 5 seconds). + * @param redeliverMax Maximum number of redelivery attempts. + * @param redeliverInterval Interval between redelivery attempts. + * @param redeliverFailureListener Receiver of [[RedeliverFailure]] notifications which are sent when the number + * of redeliveries reaches `redeliverMax` for a sequence of messages. To enforce + * a redelivery of these messages, the listener has to restart the sending processor. + * Alternatively, it can also confirm these messages, preventing further redeliveries. */ @SerialVersionUID(1L) -class ChannelSettings( - val redeliverMax: Int, - val redeliverInterval: FiniteDuration) extends Serializable { +case class ChannelSettings( + val redeliverMax: Int = 5, + val redeliverInterval: FiniteDuration = 5.seconds, + val redeliverFailureListener: Option[ActorRef] = None) { /** * Java API. */ def withRedeliverMax(redeliverMax: Int): ChannelSettings = - update(redeliverMax = redeliverMax) + copy(redeliverMax = redeliverMax) /** * Java API. */ def withRedeliverInterval(redeliverInterval: FiniteDuration): ChannelSettings = - update(redeliverInterval = redeliverInterval) - - private def update( - redeliverMax: Int = redeliverMax, - redeliverInterval: FiniteDuration = redeliverInterval): ChannelSettings = - new ChannelSettings(redeliverMax, redeliverInterval) -} - -object ChannelSettings { - def apply( - redeliverMax: Int = 5, - redeliverInterval: FiniteDuration = 5 seconds): ChannelSettings = - new ChannelSettings(redeliverMax, redeliverInterval) + copy(redeliverInterval = redeliverInterval) /** * Java API. */ - def create() = apply() + def withRedeliverFailureListener(redeliverFailureListener: ActorRef): ChannelSettings = + copy(redeliverFailureListener = Option(redeliverFailureListener)) +} + +object ChannelSettings { + /** + * Java API. + */ + def create() = ChannelSettings.apply() } /** - * A channel is used by [[Processor]]s for sending [[Persistent]] messages to destinations. The main - * responsibility of a channel is to prevent redundant delivery of replayed messages to destinations + * A channel is used by [[Processor]]s (and [[View]]s) for sending [[Persistent]] messages to destinations. + * The main responsibility of a channel is to prevent redundant delivery of replayed messages to destinations * when a processor is recovered. * - * A channel is instructed to deliver a persistent message to a `destination` with the [[Deliver]] - * command. + * A channel is instructed to deliver a persistent message to a destination with the [[Deliver]] command. A + * destination is provided as `ActorPath` and messages are sent via that path's `ActorSelection`. * * {{{ * class ForwardExample extends Processor { @@ -71,7 +74,7 @@ object ChannelSettings { * def receive = { * case m @ Persistent(payload, _) => * // forward modified message to destination - * channel forward Deliver(m.withPayload(s"fw: ${payload}"), destination) + * channel forward Deliver(m.withPayload(s"fw: ${payload}"), destination.path) * } * } * }}} @@ -86,7 +89,7 @@ object ChannelSettings { * def receive = { * case m @ Persistent(payload, _) => * // reply modified message to sender - * channel ! Deliver(m.withPayload(s"re: ${payload}"), sender) + * channel ! Deliver(m.withPayload(s"re: ${payload}"), sender.path) * } * } * }}} @@ -105,39 +108,38 @@ object ChannelSettings { * }}} * * If a destination does not confirm the receipt of a `ConfirmablePersistent` message, it will be redelivered - * by the channel according to the parameters in [[ChannelSettings]]. Message redelivery is done out of order - * with regards to normal delivery i.e. redelivered messages may arrive later than newer normally delivered - * messages. Redelivered messages have a `redeliveries` value greater than zero. + * by the channel according to the parameters in [[ChannelSettings]]. Redelivered messages have a `redeliveries` + * value greater than zero. * - * If the maximum number of redeliveries for a certain message is reached and there is still no confirmation - * from the destination, then this message is removed from the channel. In order to deliver that message to - * the destination again, the processor must replay its stored messages to the channel (during start or restart). - * Replayed, unconfirmed messages are then processed and delivered by the channel again. These messages are now - * duplicates (with a `redeliveries` counter starting from zero). Duplicates can be detected by destinations - * by tracking message sequence numbers. + * If the maximum number of redeliveries is reached for certain messages, they are removed from the channel and + * a `redeliverFailureListener` (if specified, see [[ChannelSettings]]) is notified about these messages with a + * [[RedeliverFailure]] message. Besides other application-specific tasks, this listener can restart the sending + * processor to enforce a redelivery of these messages or confirm these messages to prevent further redeliveries. * * @see [[Deliver]] */ final class Channel private[akka] (_channelId: Option[String], channelSettings: ChannelSettings) extends Actor { + import channelSettings._ + private val id = _channelId match { case Some(cid) ⇒ cid case None ⇒ Persistence(context.system).channelId(self) } - private val journal = Persistence(context.system).journalFor(id) - - private val reliableDelivery = context.actorOf(Props(classOf[ReliableDelivery], channelSettings)) - private val resolvedDelivery = context.actorOf(Props(classOf[ResolvedDelivery], reliableDelivery)) + private val journal = Persistence(context.system).confirmationBatchingJournalForChannel(id) + private val delivery = context.actorOf(Props(classOf[ReliableDelivery], channelSettings)) def receive = { - case d @ Deliver(persistent: PersistentRepr, _, _) ⇒ - if (!persistent.confirms.contains(id)) resolvedDelivery forward d.copy(prepareDelivery(persistent)) + case d @ Deliver(persistent: PersistentRepr, _) ⇒ + if (!persistent.confirms.contains(id)) delivery forward d.copy(prepareDelivery(persistent)) + case d: RedeliverFailure ⇒ redeliverFailureListener.foreach(_ ! d) + case d: Delivered ⇒ delivery forward d } private def prepareDelivery(persistent: PersistentRepr): PersistentRepr = ConfirmablePersistentImpl(persistent, confirmTarget = journal, - confirmMessage = Confirm(persistent.processorId, persistent.sequenceNr, id)) + confirmMessage = DeliveredByChannel(persistent.processorId, id, persistent.sequenceNr, channel = self)) } object Channel { @@ -178,189 +180,144 @@ object Channel { } /** - * Instructs a [[Channel]] or [[PersistentChannel]] to deliver `persistent` message to - * destination `destination`. The `resolve` parameter can be: - * - * - `Resolve.Destination`: will resolve a new destination reference from the specified - * `destination`s path. The `persistent` message will be sent to the newly resolved - * destination. - * - `Resolve.Sender`: will resolve a new sender reference from this `Deliver` message's - * `sender` path. The `persistent` message will be sent to the specified `destination` - * using the newly resolved sender. - * - `Resolve.Off`: will not do any resolution (default). - * - * Resolving an actor reference means first obtaining an `ActorSelection` from the path of - * the reference to be resolved and then obtaining a new actor reference via an `Identify` - * - `ActorIdentity` conversation. Actor reference resolution does not change the original - * order of messages. - * - * Resolving actor references may become necessary when using the stored sender references - * of replayed messages. A stored sender reference may become invalid (for example, it may - * reference a previous sender incarnation, after a JVM restart). Depending on how a processor - * uses sender references, two resolution strategies are relevant. - * - * - `Resolve.Sender` when a processor forwards a replayed message to a destination. - * - * {{{ - * channel forward Deliver(message, destination, Resolve.Sender) - * }}} - * - * - `Resolve.Destination` when a processor replies to the sender of a replayed message. In - * this case the sender is used as channel destination. - * - * {{{ - * channel ! Deliver(message, sender, Resolve.Destination) - * }}} - * - * A destination or sender reference will only be resolved by a channel if - * - * - the `resolve` parameter is set to `Resolve.Destination` or `Resolve.Channel` - * - the message is replayed - * - the message is not retained by the channel and - * - there was no previous successful resolve action for that message + * Instructs a [[Channel]] or [[PersistentChannel]] to deliver a `persistent` message to + * a `destination`. * * @param persistent persistent message. * @param destination persistent message destination. - * @param resolve resolve strategy. */ @SerialVersionUID(1L) -case class Deliver(persistent: Persistent, destination: ActorRef, resolve: Resolve.ResolveStrategy = Resolve.Off) extends Message +case class Deliver(persistent: Persistent, destination: ActorPath) extends Message object Deliver { /** * Java API. */ - def create(persistent: Persistent, destination: ActorRef) = Deliver(persistent, destination) - - /** - * Java API. - */ - def create(persistent: Persistent, destination: ActorRef, resolve: Resolve.ResolveStrategy) = Deliver(persistent, destination, resolve) + def create(persistent: Persistent, destination: ActorPath) = Deliver(persistent, destination) } /** - * Actor reference resolution strategy. - * - * @see [[Deliver]] + * Plugin API: confirmation message generated by receivers of [[ConfirmablePersistent]] messages + * by calling `ConfirmablePersistent.confirm()`. */ -object Resolve { - sealed abstract class ResolveStrategy +trait Delivered extends Message { + def channelId: String + def persistentSequenceNr: Long + def deliverySequenceNr: Long + def channel: ActorRef /** - * No resolution. + * INTERNAL API. */ - @SerialVersionUID(1L) - case object Off extends ResolveStrategy - - /** - * [[Channel]] should resolve the `sender` of a [[Deliver]] message. - */ - @SerialVersionUID(1L) - case object Sender extends ResolveStrategy - - /** - * [[Channel]] should resolve the `destination` of a [[Deliver]] message. - */ - @SerialVersionUID(1L) - case object Destination extends ResolveStrategy - - /** - * Java API. - */ - def off() = Off - - /** - * Java API. - */ - def sender() = Sender - - /** - * Java API. - */ - def destination() = Destination + private[persistence] def update(deliverySequenceNr: Long = deliverySequenceNr, channel: ActorRef = channel): Delivered } /** - * Resolves actor references as specified by [[Deliver]] requests and then delegates delivery - * to `next`. + * Plugin API. */ -private class ResolvedDelivery(next: ActorRef) extends Actor with Stash { - private var currentResolution: Envelope = _ +case class DeliveredByChannel( + processorId: String, + channelId: String, + persistentSequenceNr: Long, + deliverySequenceNr: Long = 0L, + channel: ActorRef = null) extends Delivered with PersistentConfirmation { - private val delivering: Receive = { - case d @ Deliver(persistent: PersistentRepr, destination, resolve) ⇒ - resolve match { - case Resolve.Sender if !persistent.resolved ⇒ - context.actorSelection(sender.path) ! Identify(1) - context.become(resolving, discardOld = false) - currentResolution = Envelope(d, sender, context.system) - case Resolve.Destination if !persistent.resolved ⇒ - context.actorSelection(destination.path) ! Identify(1) - context.become(resolving, discardOld = false) - currentResolution = Envelope(d, sender, context.system) - case _ ⇒ next forward d + def sequenceNr: Long = persistentSequenceNr + def update(deliverySequenceNr: Long, channel: ActorRef): DeliveredByChannel = + copy(deliverySequenceNr = deliverySequenceNr, channel = channel) +} + +/** + * INTERNAL API. + */ +private[persistence] class DeliveredByChannelBatching(journal: ActorRef, settings: PersistenceSettings) extends Actor { + private val publish = settings.internal.publishConfirmations + private val batchMax = settings.journal.maxConfirmationBatchSize + + private var batching = false + private var batch = Vector.empty[DeliveredByChannel] + + def receive = { + case WriteConfirmationsSuccess(confirmations) ⇒ + if (batch.isEmpty) batching = false else journalBatch() + confirmations.foreach { c ⇒ + val dbc = c.asInstanceOf[DeliveredByChannel] + if (dbc.channel != null) dbc.channel ! c + if (publish) context.system.eventStream.publish(c) } - unstash() + case WriteConfirmationsFailure(_) ⇒ + if (batch.isEmpty) batching = false else journalBatch() + case d: DeliveredByChannel ⇒ + addToBatch(d) + if (!batching || maxBatchSizeReached) journalBatch() + case m ⇒ journal forward m } - private val resolving: Receive = { - case ActorIdentity(1, resolvedOption) ⇒ - val Envelope(d: Deliver, sender) = currentResolution - if (d.resolve == Resolve.Sender) { - next tell (d, resolvedOption.getOrElse(sender)) - } else if (d.resolve == Resolve.Destination) { - next tell (d.copy(destination = resolvedOption.getOrElse(d.destination)), sender) - } - context.unbecome() - unstash() - case _: Deliver ⇒ stash() - } + def addToBatch(pc: DeliveredByChannel): Unit = + batch = batch :+ pc - def receive = delivering + def maxBatchSizeReached: Boolean = + batch.length >= batchMax + + def journalBatch(): Unit = { + journal ! WriteConfirmations(batch, self) + batch = Vector.empty + batching = true + } +} + +/** + * Notification message to inform channel listeners about messages that have reached the maximum + * number of redeliveries. + */ +case class RedeliverFailure(messages: immutable.Seq[ConfirmablePersistent]) { + /** + * Java API. + */ + def getMessages: JIterable[ConfirmablePersistent] = messages.asJava } /** * Reliably deliver messages contained in [[Deliver]] requests to their destinations. Unconfirmed * messages are redelivered according to the parameters in [[ChannelSettings]]. */ -private class ReliableDelivery(channelSettings: ChannelSettings) extends Actor { - import channelSettings._ +private class ReliableDelivery(redeliverSettings: ChannelSettings) extends Actor { + import redeliverSettings._ import ReliableDelivery._ - private val redelivery = context.actorOf(Props(classOf[Redelivery], channelSettings)) - private var attempts: DeliveryAttempts = Map.empty - private var sequenceNr: Long = 0L + private val redelivery = context.actorOf(Props(classOf[Redelivery], redeliverSettings)) + private var deliveryAttempts: DeliveryAttempts = immutable.SortedMap.empty + private var deliverySequenceNr: Long = 0L def receive = { - case d @ Deliver(persistent: PersistentRepr, destination, _) ⇒ - val dsnr = nextSequenceNr() + case d @ Deliver(persistent: ConfirmablePersistentImpl, destination) ⇒ + val dsnr = nextDeliverySequenceNr() val psnr = persistent.sequenceNr - val confirm = persistent.confirmMessage.copy(channelEndpoint = self) + val confirm = persistent.confirmMessage.update(deliverySequenceNr = dsnr) val updated = persistent.update(confirmMessage = confirm, sequenceNr = if (psnr == 0) dsnr else psnr) - destination forward updated - attempts += ((updated.processorId, updated.sequenceNr) -> DeliveryAttempt(updated, destination, sender, dsnr)) - case c @ Confirm(processorId, messageSequenceNr, _, _, _) ⇒ - attempts -= ((processorId, messageSequenceNr)) + context.actorSelection(destination).tell(updated, sender) + deliveryAttempts += (dsnr -> DeliveryAttempt(updated, destination, sender)) + case d: Delivered ⇒ + deliveryAttempts -= d.deliverySequenceNr + redelivery forward d case Redeliver ⇒ val limit = System.nanoTime - redeliverInterval.toNanos - val (older, younger) = attempts.partition { case (_, a) ⇒ a.timestamp < limit } + val (older, younger) = deliveryAttempts.span { case (_, a) ⇒ a.timestamp < limit } redelivery ! Redeliver(older, redeliverMax) - attempts = younger + deliveryAttempts = younger } - private def nextSequenceNr(): Long = { - sequenceNr += 1 - sequenceNr + private def nextDeliverySequenceNr(): Long = { + deliverySequenceNr += 1 + deliverySequenceNr } } private object ReliableDelivery { - type DeliveryAttempts = immutable.Map[(String, Long), DeliveryAttempt] - - case class DeliveryAttempt(persistent: PersistentRepr, destination: ActorRef, sender: ActorRef, deliverySequenceNr: Long, timestamp: Long = System.nanoTime) { - def withChannelEndpoint(channelEndpoint: ActorRef) = - copy(persistent.update(confirmMessage = persistent.confirmMessage.copy(channelEndpoint = channelEndpoint))) + type DeliveryAttempts = immutable.SortedMap[Long, DeliveryAttempt] + type FailedAttempts = Vector[ConfirmablePersistentImpl] + case class DeliveryAttempt(persistent: ConfirmablePersistentImpl, destination: ActorPath, sender: ActorRef, timestamp: Long = System.nanoTime) { def incrementRedeliveryCount = copy(persistent.update(redeliveries = persistent.redeliveries + 1)) } @@ -371,40 +328,42 @@ private object ReliableDelivery { /** * Redelivery process used by [[ReliableDelivery]]. */ -private class Redelivery(channelSettings: ChannelSettings) extends Actor { +private class Redelivery(redeliverSettings: ChannelSettings) extends Actor { import context.dispatcher - import channelSettings._ + import redeliverSettings._ import ReliableDelivery._ - private var attempts: DeliveryAttempts = Map.empty - private var schedule: Cancellable = _ + private var redeliveryAttempts: DeliveryAttempts = immutable.SortedMap.empty + private var redeliverySchedule: Cancellable = _ def receive = { case Redeliver(as, max) ⇒ - attempts ++= as.map { case (k, a) ⇒ (k, a.withChannelEndpoint(self)) } - attempts = attempts.foldLeft[DeliveryAttempts](Map.empty) { - case (acc, (k, attempt)) ⇒ - // drop redelivery attempts that exceed redeliveryMax - if (attempt.persistent.redeliveries >= redeliverMax) acc - // increase redelivery count of attempt - else acc + (k -> attempt.incrementRedeliveryCount) + val (attempts, failed) = (redeliveryAttempts ++ as).foldLeft[(DeliveryAttempts, FailedAttempts)]((immutable.SortedMap.empty, Vector.empty)) { + case ((attempts, failed), (k, attempt)) ⇒ + val persistent = attempt.persistent + if (persistent.redeliveries >= redeliverMax) { + (attempts, failed :+ persistent) + } else { + val updated = attempt.incrementRedeliveryCount + context.actorSelection(updated.destination).tell(updated.persistent, updated.sender) + (attempts.updated(k, updated), failed) + + } } - redeliver(attempts) + redeliveryAttempts = attempts scheduleRedelivery() - case c @ Confirm(processorId, messageSequenceNr, _, _, _) ⇒ - attempts -= ((processorId, messageSequenceNr)) + failed.headOption.foreach(_.confirmMessage.channel ! RedeliverFailure(failed)) + case c: Delivered ⇒ + redeliveryAttempts -= c.deliverySequenceNr } override def preStart(): Unit = scheduleRedelivery() override def postStop(): Unit = - schedule.cancel() + redeliverySchedule.cancel() private def scheduleRedelivery(): Unit = - schedule = context.system.scheduler.scheduleOnce(redeliverInterval, context.parent, Redeliver) - - private def redeliver(attempts: DeliveryAttempts): Unit = - attempts.values.toSeq.sortBy(_.deliverySequenceNr).foreach(ad ⇒ ad.destination tell (ad.persistent, ad.sender)) + redeliverySchedule = context.system.scheduler.scheduleOnce(redeliverInterval, context.parent, Redeliver) } diff --git a/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala b/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala index 672e861b77..bdb08dcec5 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala @@ -17,10 +17,6 @@ import akka.persistence.JournalProtocol._ * Event sourcing mixin for a [[Processor]]. */ private[persistence] trait Eventsourced extends Processor { - private trait State { - def aroundReceive(receive: Receive, message: Any): Unit - } - /** * Processor recovery state. Waits for recovery completion and then changes to * `processingCommands` @@ -31,8 +27,9 @@ private[persistence] trait Eventsourced extends Processor { def aroundReceive(receive: Receive, message: Any) { Eventsourced.super.aroundReceive(receive, message) message match { - case _: ReplaySuccess | _: ReplayFailure ⇒ currentState = processingCommands - case _ ⇒ + case _: ReadHighestSequenceNrSuccess | _: ReadHighestSequenceNrFailure ⇒ + currentState = processingCommands + case _ ⇒ } } } @@ -48,7 +45,7 @@ private[persistence] trait Eventsourced extends Processor { override def toString: String = "processing commands" def aroundReceive(receive: Receive, message: Any) { - Eventsourced.super.aroundReceive(receive, LoopSuccess(message)) + Eventsourced.super.aroundReceive(receive, LoopMessageSuccess(message)) if (!persistInvocations.isEmpty) { currentState = persistingEvents Eventsourced.super.aroundReceive(receive, PersistentBatch(persistentEventBatch.reverse)) @@ -75,15 +72,15 @@ private[persistence] trait Eventsourced extends Processor { case p: PersistentRepr ⇒ deleteMessage(p.sequenceNr, true) throw new UnsupportedOperationException("Persistent commands not supported") - case WriteSuccess(p) ⇒ + case WriteMessageSuccess(p) ⇒ withCurrentPersistent(p)(p ⇒ persistInvocations.head._2(p.payload)) onWriteComplete() - case e @ WriteFailure(p, _) ⇒ + case e @ WriteMessageFailure(p, _) ⇒ Eventsourced.super.aroundReceive(receive, message) // stops actor by default onWriteComplete() - case s @ WriteBatchSuccess ⇒ Eventsourced.super.aroundReceive(receive, s) - case f: WriteBatchFailure ⇒ Eventsourced.super.aroundReceive(receive, f) - case other ⇒ processorStash.stash() + case s @ WriteMessagesSuccess ⇒ Eventsourced.super.aroundReceive(receive, s) + case f: WriteMessagesFailure ⇒ Eventsourced.super.aroundReceive(receive, f) + case other ⇒ processorStash.stash() } def onWriteComplete(): Unit = { diff --git a/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala b/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala index d0626ff221..5b5b23e500 100644 --- a/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala +++ b/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala @@ -13,117 +13,155 @@ import akka.persistence.serialization.Message /** * INTERNAL API. * - * Defines messages exchanged between processors, channels and a journal. + * Messages exchanged between processors, views, channels and a journal. */ private[persistence] object JournalProtocol { /** - * Instructs a journal to delete all persistent messages with sequence numbers in - * the range from `fromSequenceNr` to `toSequenceNr` (both inclusive). If `permanent` - * is set to `false`, the persistent messages are marked as deleted in the journal, - * otherwise they are permanently deleted from the journal. + * Request to delete messages identified by `messageIds`. If `permanent` is set to `false`, + * the persistent messages are marked as deleted, otherwise they are permanently deleted. */ - case class Delete(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean) + case class DeleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean, requestor: Option[ActorRef] = None) /** - * Message sent after confirming the receipt of a [[ConfirmablePersistent]] message. + * Reply message to a successful [[DeleteMessages]] request. + */ + case class DeleteMessagesSuccess(messageIds: immutable.Seq[PersistentId]) + + /** + * Reply message to a failed [[DeleteMessages]] request. + */ + case class DeleteMessagesFailure(cause: Throwable) + + /** + * Request to delete all persistent messages with sequence numbers up to `toSequenceNr` + * (inclusive). If `permanent` is set to `false`, the persistent messages are marked + * as deleted in the journal, otherwise they are permanently deleted from the journal. + */ + case class DeleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean) + + /** + * Request to write delivery confirmations. + */ + case class WriteConfirmations(confirmations: immutable.Seq[PersistentConfirmation], requestor: ActorRef) + + /** + * Reply message to a successful [[WriteConfirmations]] request. + */ + case class WriteConfirmationsSuccess(confirmations: immutable.Seq[PersistentConfirmation]) + + /** + * Reply message to a failed [[WriteConfirmations]] request. + */ + case class WriteConfirmationsFailure(cause: Throwable) + + /** + * Request to write messages. * - * @param processorId id of the processor that sent the message corresponding to - * this confirmation to a channel. - * @param messageSequenceNr sequence number of the sent message. - * @param channelId id of the channel that delivered the message corresponding to - * this confirmation. - * @param wrapperSequenceNr sequence number of the message stored by a persistent - * channel. This message contains the [[Deliver]] request - * with the message identified by `processorId` and - * `messageSequenceNumber`. - * @param channelEndpoint actor reference that sent the the message corresponding to - * this confirmation. This is a child actor of the sending - * [[Channel]] or [[PersistentChannel]]. + * @param messages messages to be written. + * @param processor write requestor. */ - case class Confirm(processorId: String, messageSequenceNr: Long, channelId: String, wrapperSequenceNr: Long = 0L, channelEndpoint: ActorRef = null) extends Message + case class WriteMessages(messages: immutable.Seq[PersistentRepr], processor: ActorRef) /** - * Instructs a journal to persist a sequence of messages. - * - * @param persistentBatch batch of messages to be persisted. - * @param processor requesting processor. + * Reply message to a successful [[WriteMessages]] request. This reply is sent to the requestor + * before all subsequent [[WriteMessageSuccess]] replies. */ - case class WriteBatch(persistentBatch: immutable.Seq[PersistentRepr], processor: ActorRef) + case object WriteMessagesSuccess /** - * Reply message to a processor if a batch write succeeded. This message is received before - * all subsequent [[WriteSuccess]] messages. - */ - case object WriteBatchSuccess - - /** - * Reply message to a processor if a batch write failed. This message is received before - * all subsequent [[WriteFailure]] messages. + * Reply message to a failed [[WriteMessages]] request. This reply is sent to the requestor + * before all subsequent [[WriteMessagFailure]] replies. * * @param cause failure cause. */ - case class WriteBatchFailure(cause: Throwable) + case class WriteMessagesFailure(cause: Throwable) /** - * Reply message to a processor that `persistent` message has been successfully journaled. + * Reply message to a successful [[WriteMessages]] request. For each contained [[PersistentRepr]] message + * in the request, a separate reply is sent to the requestor. * - * @param persistent persistent message. + * @param persistent successfully written message. */ - case class WriteSuccess(persistent: PersistentRepr) + case class WriteMessageSuccess(persistent: PersistentRepr) /** - * Reply message to a processor that `persistent` message could not be journaled. + * Reply message to a failed [[WriteMessages]] request. For each contained [[PersistentRepr]] message + * in the request, a separate reply is sent to the requestor. * - * @param persistent persistent message. + * @param message message failed to be written. * @param cause failure cause. */ - case class WriteFailure(persistent: PersistentRepr, cause: Throwable) + case class WriteMessageFailure(message: PersistentRepr, cause: Throwable) /** - * Instructs a journal to loop a `message` back to `processor`, without persisting the - * message. Looping of messages through a journal is required to preserve message order - * with persistent messages. + * Request to loop a `message` back to `processor`, without persisting the message. Looping of messages + * through a journal is required to preserve message order with persistent messages. * * @param message message to be looped through the journal. - * @param processor requesting processor. + * @param processor loop requestor. */ - case class Loop(message: Any, processor: ActorRef) + case class LoopMessage(message: Any, processor: ActorRef) /** - * Reply message to a processor that a `message` has been looped through the journal. + * Reply message to a [[LoopMessage]] request. * * @param message looped message. */ - case class LoopSuccess(message: Any) + case class LoopMessageSuccess(message: Any) /** - * Instructs a journal to replay messages to `processor`. + * Request to replay messages to `processor`. * - * @param fromSequenceNr sequence number where replay should start. + * @param fromSequenceNr sequence number where replay should start (inclusive). * @param toSequenceNr sequence number where replay should end (inclusive). + * @param max maximum number of messages to be replayed. + * @param processorId requesting processor id. + * @param processor requesting processor. + * @param replayDeleted `true` if messages marked as deleted shall be replayed. + */ + case class ReplayMessages(fromSequenceNr: Long, toSequenceNr: Long, max: Long, processorId: String, processor: ActorRef, replayDeleted: Boolean = false) + + /** + * Reply message to a [[ReplayMessages]] request. A separate reply is sent to the requestor for each + * replayed message. + * + * @param persistent replayed message. + */ + case class ReplayedMessage(persistent: PersistentRepr) + + /** + * Reply message to a successful [[ReplayMessages]] request. This reply is sent to the requestor + * after all [[ReplayedMessage]] have been sent (if any). + */ + case object ReplayMessagesSuccess + + /** + * Reply message to a failed [[ReplayMessages]] request. This reply is sent to the requestor + * if a replay could not be successfully completed. + */ + case class ReplayMessagesFailure(cause: Throwable) + + /** + * Request to read the highest stored sequence number of a given processor. + * + * @param fromSequenceNr optional hint where to start searching for the maximum sequence number. * @param processorId requesting processor id. * @param processor requesting processor. */ - case class Replay(fromSequenceNr: Long, toSequenceNr: Long, processorId: String, processor: ActorRef) + case class ReadHighestSequenceNr(fromSequenceNr: Long = 1L, processorId: String, processor: ActorRef) /** - * Reply message to a processor that `persistent` message has been replayed. + * Reply message to a successful [[ReadHighestSequenceNr]] request. * - * @param persistent persistent message. + * @param highestSequenceNr read highest sequence number. */ - case class Replayed(persistent: PersistentRepr) + case class ReadHighestSequenceNrSuccess(highestSequenceNr: Long) /** - * Reply message to a processor that all `persistent` messages have been replayed. + * Reply message to a failed [[ReadHighestSequenceNr]] request. * - * @param maxSequenceNr the highest stored sequence number (for a processor). + * @param cause failure cause. */ - case class ReplaySuccess(maxSequenceNr: Long) - - /** - * Reply message to a processor that not all `persistent` messages could have been - * replayed. - */ - case class ReplayFailure(cause: Throwable) + case class ReadHighestSequenceNrFailure(cause: Throwable) } diff --git a/akka-persistence/src/main/scala/akka/persistence/Persistence.scala b/akka-persistence/src/main/scala/akka/persistence/Persistence.scala index 753a48baf9..ced7a67dd5 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Persistence.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Persistence.scala @@ -4,9 +4,62 @@ package akka.persistence +import scala.concurrent.duration._ + +import com.typesafe.config.Config + import akka.actor._ import akka.dispatch.Dispatchers import akka.persistence.journal.AsyncWriteJournal +import akka.util.Helpers.ConfigOps + +/** + * Persistence configuration. + */ +final class PersistenceSettings(config: Config) { + object journal { + val maxMessageBatchSize: Int = + config.getInt("journal.max-message-batch-size") + + val maxConfirmationBatchSize: Int = + config.getInt("journal.max-confirmation-batch-size") + + val maxDeletionBatchSize: Int = + config.getInt("journal.max-deletion-batch-size") + } + + object view { + val autoUpdate: Boolean = + config.getBoolean("view.auto-update") + + val autoUpdateInterval: FiniteDuration = + config.getMillisDuration("view.auto-update-interval") + + val autoUpdateReplayMax: Long = + posMax(config.getLong("view.auto-update-replay-max")) + + private def posMax(v: Long) = + if (v < 0) Long.MaxValue else v + } + + /** + * INTERNAL API. + * + * These config options are only used internally for testing + * purposes and are therefore not defined in reference.conf + */ + private[persistence] object internal { + val publishPluginCommands: Boolean = { + val path = "publish-plugin-commands" + config.hasPath(path) && config.getBoolean(path) + } + + val publishConfirmations: Boolean = { + val path = "publish-confirmations" + config.hasPath(path) && config.getBoolean(path) + } + } +} /** * Persistence extension. @@ -27,27 +80,34 @@ object Persistence extends ExtensionId[Persistence] with ExtensionIdProvider { */ class Persistence(val system: ExtendedActorSystem) extends Extension { private val DefaultPluginDispatcherId = "akka.persistence.dispatchers.default-plugin-dispatcher" - private val config = system.settings.config.getConfig("akka.persistence") - private val snapshotStore = createPlugin("snapshot-store", _ ⇒ DefaultPluginDispatcherId) - private val journal = createPlugin("journal", clazz ⇒ - if (classOf[AsyncWriteJournal].isAssignableFrom(clazz)) Dispatchers.DefaultDispatcherId else DefaultPluginDispatcherId) - /** - * INTERNAL API. - */ - private[persistence] val publishPluginCommands: Boolean = { - val path = "publish-plugin-commands" - // this config option is only used internally (for testing - // purposes) and is therefore not defined in reference.conf - config.hasPath(path) && config.getBoolean(path) + val settings = new PersistenceSettings(config) + + private val snapshotStore = createPlugin("snapshot-store") { _ ⇒ + DefaultPluginDispatcherId } + private val journal = createPlugin("journal") { clazz ⇒ + if (classOf[AsyncWriteJournal].isAssignableFrom(clazz)) Dispatchers.DefaultDispatcherId + else DefaultPluginDispatcherId + } + + private val confirmationBatchLayer = system.asInstanceOf[ActorSystemImpl] + .systemActorOf(Props(classOf[DeliveredByChannelBatching], journal, settings), "confirmation-batch-layer") + + private val deletionBatchLayer = system.asInstanceOf[ActorSystemImpl] + .systemActorOf(Props(classOf[DeliveredByPersistentChannelBatching], journal, settings), "deletion-batch-layer") + /** - * INTERNAL API. + * Creates a canonical processor id from a processor actor ref. */ - private[persistence] val maxBatchSize: Int = - config.getInt("journal.max-batch-size") + def processorId(processor: ActorRef): String = id(processor) + + /** + * Creates a canonical channel id from a channel actor ref. + */ + def channelId(channel: ActorRef): String = id(channel) /** * Returns a snapshot store for a processor identified by `processorId`. @@ -68,16 +128,18 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { } /** - * Creates a canonical processor id from a processor actor ref. + * INTERNAL API. */ - def processorId(processor: ActorRef): String = id(processor) + private[persistence] def confirmationBatchingJournalForChannel(channelId: String): ActorRef = + confirmationBatchLayer /** - * Creates a canonical channel id from a channel actor ref. + * INTERNAL API. */ - def channelId(channel: ActorRef): String = id(channel) + private[persistence] def deletionBatchingJournalForChannel(channelId: String): ActorRef = + deletionBatchLayer - private def createPlugin(pluginType: String, dispatcherSelector: Class[_] ⇒ String) = { + private def createPlugin(pluginType: String)(dispatcherSelector: Class[_] ⇒ String) = { val pluginConfigPath = config.getString(s"${pluginType}.plugin") val pluginConfig = system.settings.config.getConfig(pluginConfigPath) val pluginClassName = pluginConfig.getString("class") diff --git a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala index 9dfe757730..6abf10ca2d 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala @@ -12,7 +12,6 @@ import scala.collection.immutable import akka.actor.{ ActorContext, ActorRef } import akka.japi.Util.immutableSeq import akka.pattern.PromiseActorRef -import akka.persistence.JournalProtocol.Confirm import akka.persistence.serialization.Message /** @@ -115,14 +114,43 @@ case class PersistentBatch(persistentBatch: immutable.Seq[Persistent]) extends M persistentBatch.toList.asInstanceOf[List[PersistentRepr]] } +/** + * Plugin API: confirmation entry written by journal plugins. + */ +trait PersistentConfirmation { + def processorId: String + def channelId: String + def sequenceNr: Long +} + +/** + * Plugin API: persistent message identifier. + */ +trait PersistentId { + /** + * Id of processor that journals a persistent message + */ + def processorId: String + + /** + * A persistent message's sequence number. + */ + def sequenceNr: Long +} + +/** + * INTERNAL API. + */ +private[persistence] case class PersistentIdImpl(processorId: String, sequenceNr: Long) extends PersistentId + /** * Plugin API: representation of a persistent message in the journal plugin API. * - * @see[[SyncWriteJournal]] - * @see[[AsyncWriteJournal]] - * @see[[AsyncReplay]] + * @see [[journal.SyncWriteJournal]] + * @see [[journal.AsyncWriteJournal]] + * @see [[journal.AsyncRecovery]] */ -trait PersistentRepr extends Persistent with Message { +trait PersistentRepr extends Persistent with PersistentId with Message { import scala.collection.JavaConverters._ /** @@ -130,28 +158,11 @@ trait PersistentRepr extends Persistent with Message { */ def payload: Any - /** - * This persistent message's seuence number. - */ - def sequenceNr: Long - - /** - * Id of processor that journals the message - */ - def processorId: String - /** * `true` if this message is marked as deleted. */ def deleted: Boolean - /** - * `true` by default, `false` for replayed messages. Set to `true` by a channel if this - * message is replayed and its sender reference was resolved. Channels use this field to - * avoid redundant sender reference resolutions. - */ - def resolved: Boolean - /** * Number of redeliveries. Only greater than zero if message has been redelivered by a [[Channel]] * or [[PersistentChannel]]. @@ -178,7 +189,7 @@ trait PersistentRepr extends Persistent with Message { /** * Delivery confirmation message. */ - def confirmMessage: Confirm + def confirmMessage: Delivered /** * Delivery confirmation message. @@ -202,16 +213,15 @@ trait PersistentRepr extends Persistent with Message { prepareWrite(if (sender.isInstanceOf[PromiseActorRef]) context.system.deadLetters else sender) /** - * INTERNAL API. + * Creates a new copy of this [[PersistentRepr]]. */ - private[persistence] def update( + def update( sequenceNr: Long = sequenceNr, processorId: String = processorId, deleted: Boolean = deleted, - resolved: Boolean = resolved, redeliveries: Int = redeliveries, confirms: immutable.Seq[String] = confirms, - confirmMessage: Confirm = confirmMessage, + confirmMessage: Delivered = confirmMessage, confirmTarget: ActorRef = confirmTarget, sender: ActorRef = sender): PersistentRepr } @@ -230,14 +240,13 @@ object PersistentRepr { sequenceNr: Long = 0L, processorId: String = PersistentRepr.Undefined, deleted: Boolean = false, - resolved: Boolean = true, redeliveries: Int = 0, confirms: immutable.Seq[String] = Nil, confirmable: Boolean = false, - confirmMessage: Confirm = null, + confirmMessage: Delivered = null, confirmTarget: ActorRef = null, sender: ActorRef = null) = - if (confirmable) ConfirmablePersistentImpl(payload, sequenceNr, processorId, deleted, resolved, redeliveries, confirms, confirmMessage, confirmTarget, sender) + if (confirmable) ConfirmablePersistentImpl(payload, sequenceNr, processorId, deleted, redeliveries, confirms, confirmMessage, confirmTarget, sender) else PersistentImpl(payload, sequenceNr, processorId, deleted, confirms, sender) /** @@ -275,18 +284,16 @@ private[persistence] case class PersistentImpl( sequenceNr: Long, processorId: String, deleted: Boolean, - resolved: Boolean, redeliveries: Int, confirms: immutable.Seq[String], - confirmMessage: Confirm, + confirmMessage: Delivered, confirmTarget: ActorRef, sender: ActorRef) = copy(sequenceNr = sequenceNr, processorId = processorId, deleted = deleted, confirms = confirms, sender = sender) - val resolved: Boolean = false val redeliveries: Int = 0 val confirmable: Boolean = false - val confirmMessage: Confirm = null + val confirmMessage: Delivered = null val confirmTarget: ActorRef = null } @@ -298,10 +305,9 @@ private[persistence] case class ConfirmablePersistentImpl( sequenceNr: Long, processorId: String, deleted: Boolean, - resolved: Boolean, redeliveries: Int, confirms: immutable.Seq[String], - confirmMessage: Confirm, + confirmMessage: Delivered, confirmTarget: ActorRef, sender: ActorRef) extends ConfirmablePersistent with PersistentRepr { @@ -314,16 +320,16 @@ private[persistence] case class ConfirmablePersistentImpl( def confirmable = true def prepareWrite(sender: ActorRef) = - copy(sender = sender, resolved = false, confirmMessage = null, confirmTarget = null) + copy(sender = sender, confirmMessage = null, confirmTarget = null) - def update(sequenceNr: Long, processorId: String, deleted: Boolean, resolved: Boolean, redeliveries: Int, confirms: immutable.Seq[String], confirmMessage: Confirm, confirmTarget: ActorRef, sender: ActorRef) = - copy(sequenceNr = sequenceNr, processorId = processorId, deleted = deleted, resolved = resolved, redeliveries = redeliveries, confirms = confirms, confirmMessage = confirmMessage, confirmTarget = confirmTarget, sender = sender) + def update(sequenceNr: Long, processorId: String, deleted: Boolean, redeliveries: Int, confirms: immutable.Seq[String], confirmMessage: Delivered, confirmTarget: ActorRef, sender: ActorRef) = + copy(sequenceNr = sequenceNr, processorId = processorId, deleted = deleted, redeliveries = redeliveries, confirms = confirms, confirmMessage = confirmMessage, confirmTarget = confirmTarget, sender = sender) } /** * INTERNAL API. */ private[persistence] object ConfirmablePersistentImpl { - def apply(persistent: PersistentRepr, confirmMessage: Confirm, confirmTarget: ActorRef = null): ConfirmablePersistentImpl = - ConfirmablePersistentImpl(persistent.payload, persistent.sequenceNr, persistent.processorId, persistent.deleted, persistent.resolved, persistent.redeliveries, persistent.confirms, confirmMessage, confirmTarget, persistent.sender) + def apply(persistent: PersistentRepr, confirmMessage: Delivered, confirmTarget: ActorRef = null): ConfirmablePersistentImpl = + ConfirmablePersistentImpl(persistent.payload, persistent.sequenceNr, persistent.processorId, persistent.deleted, persistent.redeliveries, persistent.confirms, confirmMessage, confirmTarget, persistent.sender) } diff --git a/akka-persistence/src/main/scala/akka/persistence/PersistentChannel.scala b/akka-persistence/src/main/scala/akka/persistence/PersistentChannel.scala index 1742908df6..307e4ec552 100644 --- a/akka-persistence/src/main/scala/akka/persistence/PersistentChannel.scala +++ b/akka-persistence/src/main/scala/akka/persistence/PersistentChannel.scala @@ -9,82 +9,120 @@ import scala.language.postfixOps import akka.AkkaException import akka.actor._ - -import akka.persistence.JournalProtocol.Confirm +import akka.persistence.JournalProtocol._ /** * A [[PersistentChannel]] configuration object. * - * @param redeliverMax maximum number of redeliveries (default is 5). - * @param redeliverInterval interval between redeliveries (default is 5 seconds). - * @param replyPersistent if `true` the sender will receive the successfully stored [[Persistent]] - * message that has been submitted with a [[Deliver]] request, or a - * [[PersistenceFailure]] message in case of a persistence failure. + * @param redeliverMax Maximum number of redelivery attempts. + * @param redeliverInterval Interval between redelivery attempts. + * @param redeliverFailureListener Receiver of [[RedeliverFailure]] notifications which are sent when the number + * of redeliveries reaches `redeliverMax` for a sequence of messages. To enforce + * a redelivery of these messages, the listener has to [[Reset]] the persistent + * channel. Alternatively, it can also confirm these messages, preventing further + * redeliveries. + * @param replyPersistent If `true` the sender will receive the successfully stored [[Persistent]] message that has + * been submitted with a [[Deliver]] request, or a [[PersistenceFailure]] message in case of + * a persistence failure. + * @param pendingConfirmationsMax Message delivery is suspended by a channel if the number of pending reaches the + * specified value and is resumed again if the number of pending confirmations falls + * below `pendingConfirmationsMin`. + * @param pendingConfirmationsMin Message delivery is resumed if the number of pending confirmations falls below + * this limit. It is suspended again if it reaches `pendingConfirmationsMax`. + * Message delivery is enabled for a channel if the number of pending confirmations + * is below this limit, or, is resumed again if it falls below this limit. + * @param idleTimeout Maximum interval between read attempts made by a persistent channel. This settings applies, + * for example, after a journal failed to serve a read request. The next read request is then + * made after the configured timeout. */ -class PersistentChannelSettings( - redeliverMax: Int, - redeliverInterval: FiniteDuration, - val replyPersistent: Boolean) extends ChannelSettings(redeliverMax, redeliverInterval) { +@SerialVersionUID(1L) +case class PersistentChannelSettings( + val redeliverMax: Int = 5, + val redeliverInterval: FiniteDuration = 5.seconds, + val redeliverFailureListener: Option[ActorRef] = None, + val replyPersistent: Boolean = false, + val pendingConfirmationsMax: Long = Long.MaxValue, + val pendingConfirmationsMin: Long = Long.MaxValue, + val idleTimeout: FiniteDuration = 1.minute) { /** * Java API. */ - override def withRedeliverMax(redeliverMax: Int): PersistentChannelSettings = - updatePersistent(redeliverMax = redeliverMax) + def withRedeliverMax(redeliverMax: Int): PersistentChannelSettings = + copy(redeliverMax = redeliverMax) /** * Java API. */ - override def withRedeliverInterval(redeliverInterval: FiniteDuration): PersistentChannelSettings = - updatePersistent(redeliverInterval = redeliverInterval) + def withRedeliverInterval(redeliverInterval: FiniteDuration): PersistentChannelSettings = + copy(redeliverInterval = redeliverInterval) /** * Java API. */ - def withReplyPersistent(replayPersistent: Boolean) = - updatePersistent(replyPersistent = replyPersistent) + def withRedeliverFailureListener(redeliverFailureListener: ActorRef): PersistentChannelSettings = + copy(redeliverFailureListener = Option(redeliverFailureListener)) - private def updatePersistent( // compile error if method name is 'update' - redeliverMax: Int = redeliverMax, - redeliverInterval: FiniteDuration = redeliverInterval, - replyPersistent: Boolean = replyPersistent): PersistentChannelSettings = - new PersistentChannelSettings(redeliverMax, redeliverInterval, replyPersistent) + /** + * Java API. + */ + def withReplyPersistent(replayPersistent: Boolean): PersistentChannelSettings = + copy(replyPersistent = replyPersistent) + + /** + * Java API. + */ + def withPendingConfirmationsMax(pendingConfirmationsMax: Long): PersistentChannelSettings = + copy(pendingConfirmationsMax = pendingConfirmationsMax) + + /** + * Java API. + */ + def withPendingConfirmationsMin(pendingConfirmationsMin: Long): PersistentChannelSettings = + copy(pendingConfirmationsMin = pendingConfirmationsMin) + + /** + * Converts this configuration object to [[ChannelSettings]]. + */ + def toChannelSettings: ChannelSettings = + ChannelSettings(redeliverMax, redeliverInterval, redeliverFailureListener) } object PersistentChannelSettings { - def apply( - redeliverMax: Int = 5, - redeliverInterval: FiniteDuration = 5 seconds, - replyPersistent: Boolean = false): PersistentChannelSettings = - new PersistentChannelSettings(redeliverMax, redeliverInterval, replyPersistent) - /** * Java API. */ - def create() = apply() + def create() = PersistentChannelSettings.apply() } /** - * A [[PersistentChannel]] implements the same functionality as a [[Channel]] but additionally - * persists messages before they are delivered. This is done by using internally a special-purpose - * [[Processor]]. Therefore, the main use case of a persistent channel is standalone usage i.e. - * independent of an application-specific [[Processor]] sending messages to a channel. Messages - * that have been persisted by a persistent channel are deleted when destinations confirm the - * receipt of these messages. + * Resets a [[PersistentChannel]], forcing it to redeliver all unconfirmed persistent + * messages. This does not affect writing [[Deliver]] requests. + */ +case object Reset + +/** + * Exception thrown by a [[PersistentChannel]] child actor to re-initiate delivery. + */ +class ResetException extends AkkaException("Channel reset on application request") + +/** + * A [[PersistentChannel]] implements the same functionality as a [[Channel]] but additionally persists + * [[Deliver]] requests before they are served. Persistent channels are useful in combination with slow + * destinations or destinations that are unavailable for a long time. `Deliver` requests that have been + * persisted by a persistent channel are deleted when destinations confirm the receipt of the corresponding + * messages. * - * Using a persistent channel in combination with a [[Processor]] can make sense if destinations - * are unavailable for a long time and an application doesn't want to buffer all messages in - * memory (but write them to the journal instead). In this case, delivery can be disabled with - * [[DisableDelivery]] (to stop delivery and persist-only) and re-enabled with [[EnableDelivery]]. - * `EnableDelivery` replays persistent messages to this channel and the channel delivers all - * unconfirmed messages again (which may then show up as duplicates at destinations as described - * in the API docs of [[Channel]]. Duplicates can be detected by tracking message sequence numbers - * and redelivery counters). + * The number of pending confirmations can be limited by a persistent channel based on the parameters of + * [[PersistentChannelSettings]]. It can suspend delivery when the number of pending confirmations reaches + * `pendingConfirmationsMax` and resume delivery again when this number falls below `pendingConfirmationsMin`. + * This prevents both flooding destinations with more messages than they can process and unlimited memory + * consumption by the channel. A persistent channel continues to persist [[Deliver]] request even when + * message delivery is temporarily suspended. * - * A persistent channel can also reply to [[Deliver]] senders whether persisting a message was - * successful or not (see `replyPersistent` of [[PersistentChannelSettings]]). If enabled, the - * sender will receive the persisted message as reply (i.e. a [[Persistent]] message), otherwise - * a [[PersistenceFailure]] message. + * A persistent channel can also reply to [[Deliver]] senders if the request has been successfully persisted + * or not (see `replyPersistent` parameter in [[PersistentChannelSettings]]). In case of success, the channel + * replies with the contained [[Persistent]] message, otherwise with a [[PersistenceFailure]] message. */ final class PersistentChannel private[akka] (_channelId: Option[String], channelSettings: PersistentChannelSettings) extends Actor { private val id = _channelId match { @@ -92,19 +130,17 @@ final class PersistentChannel private[akka] (_channelId: Option[String], channel case None ⇒ Persistence(context.system).channelId(self) } - private val reliableDelivery = context.actorOf(Props(classOf[ReliableDelivery], channelSettings)) - private val resolvedDelivery = context.actorOf(Props(classOf[ResolvedDelivery], reliableDelivery)) - private val reliableStorage = context.actorOf(Props(classOf[ReliableStorage], id, channelSettings, resolvedDelivery)) + private val requestReader = context.actorOf(Props(classOf[RequestReader], id, channelSettings)) + private val requestWriter = context.actorOf(Props(classOf[RequestWriter], id, channelSettings, requestReader)) def receive = { - case d @ Deliver(persistent: PersistentRepr, destination, resolve) ⇒ + case d @ Deliver(persistent: PersistentRepr, destination) ⇒ // Persist the Deliver request by sending reliableStorage a Persistent message // with the Deliver request as payload. This persistent message is referred to // as the wrapper message, whereas the persistent message contained in the Deliver // request is referred to as wrapped message (see also class ReliableStorage). - if (!persistent.confirms.contains(id)) reliableStorage forward Persistent(d) - case DisableDelivery ⇒ reliableStorage ! DisableDelivery - case EnableDelivery ⇒ reliableStorage ! EnableDelivery + if (!persistent.confirms.contains(id)) requestWriter forward Persistent(d) + case Reset ⇒ requestReader ! Reset } } @@ -145,70 +181,192 @@ object PersistentChannel { } /** - * Instructs a [[PersistentChannel]] to disable the delivery of [[Persistent]] messages to their destination. - * The persistent channel, however, continues to persist messages (for later delivery). - * - * @see [[EnableDelivery]] + * Plugin API. */ -@SerialVersionUID(1L) -case object DisableDelivery { - /** - * Java API. - */ - def getInstance = this +case class DeliveredByPersistentChannel( + channelId: String, + persistentSequenceNr: Long, + deliverySequenceNr: Long = 0L, + channel: ActorRef = null) extends Delivered with PersistentId { + + def processorId: String = channelId + def sequenceNr: Long = persistentSequenceNr + def update(deliverySequenceNr: Long, channel: ActorRef): DeliveredByPersistentChannel = + copy(deliverySequenceNr = deliverySequenceNr, channel = channel) } /** - * Instructs a [[PersistentChannel]] to re-enable the delivery of [[Persistent]] messages to their destination. - * This will first deliver all messages that have been stored by a persistent channel for which no confirmation - * is available yet. New [[Deliver]] requests are processed after all stored messages have been delivered. This - * request only has an effect if a persistent channel has previously been disabled with [[DisableDelivery]]. - * - * @see [[DisableDelivery]] + * INTERNAL API. */ -@SerialVersionUID(1L) -case object EnableDelivery { - /** - * Java API. - */ - def getInstance = this +private[persistence] class DeliveredByPersistentChannelBatching(journal: ActorRef, settings: PersistenceSettings) extends Actor { + private val publish = settings.internal.publishConfirmations + private val batchMax = settings.journal.maxConfirmationBatchSize + + private var batching = false + private var batch = Vector.empty[DeliveredByPersistentChannel] + + def receive = { + case DeleteMessagesSuccess(messageIds) ⇒ + if (batch.isEmpty) batching = false else journalBatch() + messageIds.foreach { + case c: DeliveredByPersistentChannel ⇒ + c.channel ! c + if (publish) context.system.eventStream.publish(c) + } + case DeleteMessagesFailure(_) ⇒ + if (batch.isEmpty) batching = false else journalBatch() + case d: DeliveredByPersistentChannel ⇒ + addToBatch(d) + if (!batching || maxBatchSizeReached) journalBatch() + case m ⇒ journal forward m + } + + def addToBatch(pc: DeliveredByPersistentChannel): Unit = + batch = batch :+ pc + + def maxBatchSizeReached: Boolean = + batch.length >= batchMax + + def journalBatch(): Unit = { + journal ! DeleteMessages(batch, true, Some(self)) + batch = Vector.empty + batching = true + } } /** - * Thrown by a persistent channel when [[EnableDelivery]] has been requested and delivery has been previously - * disabled for that channel. + * Writes [[Deliver]] requests to the journal. */ -@SerialVersionUID(1L) -class ChannelRestartRequiredException extends AkkaException("channel restart required for enabling delivery") - -private class ReliableStorage(channelId: String, channelSettings: PersistentChannelSettings, next: ActorRef) extends Processor { +private class RequestWriter(channelId: String, channelSettings: PersistentChannelSettings, reader: ActorRef) extends Processor { + import RequestWriter._ import channelSettings._ + private val cbJournal = extension.confirmationBatchingJournalForChannel(channelId) + override val processorId = channelId - private val journal = Persistence(context.system).journalFor(channelId) - private var deliveryEnabled = true - def receive = { - case p @ Persistent(d @ Deliver(wrapped: PersistentRepr, destination, resolve), snr) ⇒ - val wrapper = p.asInstanceOf[PersistentRepr] - val prepared = prepareDelivery(wrapped, wrapper) - + case p @ Persistent(Deliver(wrapped: PersistentRepr, _), _) ⇒ if (!recoveryRunning && wrapped.processorId != PersistentRepr.Undefined) // Write a delivery confirmation to the journal so that replayed Deliver // requests from a sending processor are not persisted again. Replaying // Deliver requests is now the responsibility of this processor. - journal ! Confirm(prepared.processorId, prepared.sequenceNr, channelId) + cbJournal ! DeliveredByChannel(wrapped.processorId, channelId, wrapped.sequenceNr) if (!recoveryRunning && replyPersistent) - sender ! prepared + sender ! wrapped - if (deliveryEnabled) - next forward d.copy(prepared) + case p: PersistenceFailure ⇒ + if (replyPersistent) sender ! p + } - case p: PersistenceFailure if (replyPersistent) ⇒ sender ! p - case EnableDelivery if (!deliveryEnabled) ⇒ throw new ChannelRestartRequiredException - case DisableDelivery ⇒ deliveryEnabled = false + override protected[akka] def aroundReceive(receive: Receive, message: Any): Unit = { + super.aroundReceive(receive, message) + message match { + case WriteMessagesSuccess | WriteMessagesFailure(_) ⇒ + // activate reader after to reduce delivery latency + reader ! RequestsWritten + case _ ⇒ + } + } + + override def preRestart(reason: Throwable, message: Option[Any]): Unit = { + self ! Recover(replayMax = 0L) + } + + override def preStart(): Unit = { + self ! Recover(replayMax = 0L) + } +} + +private object RequestWriter { + case object RequestsWritten +} + +/** + * Reads [[Deliver]] requests from the journal and processes them. The number of `Deliver` requests + * processed per iteration depends on + * + * - `pendingConfirmationsMax` parameter in [[PersistentChannelSettings]] + * - `pendingConfirmationsMin` parameter in [[PersistentChannelSettings]] and the + * - current number of pending confirmations. + * + * @see [[PersistentChannel]] + */ +private class RequestReader(channelId: String, channelSettings: PersistentChannelSettings) extends Actor with Recovery { + import RequestWriter._ + import channelSettings._ + + private val delivery = context.actorOf(Props(classOf[ReliableDelivery], channelSettings.toChannelSettings)) + + private val idle: State = new State { + override def toString: String = "idle" + + def aroundReceive(receive: Receive, message: Any): Unit = message match { + case r: Recover ⇒ // ignore + case other ⇒ process(receive, other) + } + } + + def receive = { + case p @ Persistent(d @ Deliver(wrapped: PersistentRepr, destination), snr) ⇒ + val wrapper = p.asInstanceOf[PersistentRepr] + val prepared = prepareDelivery(wrapped, wrapper) + numReplayed += 1 + numPending += 1 + delivery forward d.copy(prepared) + case d: Delivered ⇒ + delivery forward d + numPending = math.max(numPending - 1L, 0L) + if (numPending == pendingConfirmationsMin) onReadRequest() + case d @ RedeliverFailure(ms) ⇒ + val numPendingPrev = numPending + numPending = math.max(numPending - ms.length, 0L) + if (numPendingPrev > pendingConfirmationsMin && numPending <= pendingConfirmationsMin) onReadRequest() + redeliverFailureListener.foreach(_.tell(d, context.parent)) + case RequestsWritten | ReceiveTimeout ⇒ + if (numPending <= pendingConfirmationsMin) onReadRequest() + case Reset ⇒ throw new ResetException + } + + def onReplaySuccess(receive: Receive, await: Boolean): Unit = { + onReplayComplete() + if (numReplayed > 0 && numPending <= pendingConfirmationsMin) onReadRequest() + numReplayed = 0L + } + + def onReplayFailure(receive: Receive, await: Boolean, cause: Throwable): Unit = { + onReplayComplete() + } + + def processorId: String = + channelId + + def snapshotterId: String = + s"${channelId}-reader" + + private val dbJournal = extension.deletionBatchingJournalForChannel(channelId) + + /** + * Number of delivery requests replayed (read) per iteration. + */ + private var numReplayed = 0L + + /** + * Number of pending confirmations. + */ + private var numPending = 0L + + context.setReceiveTimeout(channelSettings.idleTimeout) + + private def onReplayComplete(): Unit = { + _currentState = idle + receiverStash.unstashAll() + } + + private def onReadRequest(): Unit = if (_currentState == idle) { + _currentState = replayStarted(await = false) + dbJournal ! ReplayMessages(lastSequenceNr + 1L, Long.MaxValue, pendingConfirmationsMax - numPending, processorId, self) } /** @@ -220,12 +378,21 @@ private class ReliableStorage(channelId: String, channelSettings: PersistentChan // otherwise, use sequence number of the wrapped message (that has been generated by // the sending processor). val sequenceNr = if (wrapped.sequenceNr == 0L) wrapper.sequenceNr else wrapped.sequenceNr - val resolved = wrapped.resolved && wrapper.asInstanceOf[PersistentRepr].resolved - val updated = wrapped.update(sequenceNr = sequenceNr, resolved = resolved) + val updated = wrapped.update(sequenceNr = sequenceNr) // include the wrapper sequence number in the Confirm message so that the wrapper can // be deleted later when the confirmation arrives. ConfirmablePersistentImpl(updated, - confirmTarget = journal, - confirmMessage = Confirm(updated.processorId, sequenceNr, channelId, wrapper.sequenceNr)) + confirmTarget = dbJournal, + confirmMessage = DeliveredByPersistentChannel(channelId, sequenceNr, channel = self)) + } + + override def preRestart(reason: Throwable, message: Option[Any]): Unit = { + try receiverStash.unstashAll() finally super.preRestart(reason, message) + } + + override def preStart(): Unit = { + super.preStart() + self ! Recover(replayMax = 0L) + self ! RequestsWritten // considers savepoint loaded from snapshot (TODO) } } diff --git a/akka-persistence/src/main/scala/akka/persistence/Processor.scala b/akka-persistence/src/main/scala/akka/persistence/Processor.scala index 48bfb0a53c..b11079bdda 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Processor.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Processor.scala @@ -4,8 +4,7 @@ package akka.persistence -import scala.annotation.tailrec - +import akka.AkkaException import akka.actor._ import akka.dispatch._ @@ -28,7 +27,6 @@ import akka.dispatch._ * processor ! "bar" * }}} * - * * During start and restart, persistent messages are replayed to a processor so that it can recover internal * state from these messages. New messages sent to a processor during recovery do not interfere with replayed * messages, hence applications don't need to wait for a processor to complete its recovery. @@ -53,97 +51,41 @@ import akka.dispatch._ * @see [[Recover]] * @see [[PersistentBatch]] */ -trait Processor extends Actor with Stash with StashFactory { +trait Processor extends Actor with Recovery { import JournalProtocol._ - import SnapshotProtocol._ - - private val extension = Persistence(context.system) - private val _processorId = extension.processorId(self) - - import extension.maxBatchSize /** - * Processor state. + * Processes the highest stored sequence number response from the journal and then switches + * to `processing` state. */ - private trait State { - /** - * State-specific message handler. - */ - def aroundReceive(receive: Actor.Receive, message: Any): Unit + private val initializing = new State { + override def toString: String = "initializing" - protected def process(receive: Actor.Receive, message: Any) = - receive.applyOrElse(message, unhandled) - - protected def processPersistent(receive: Actor.Receive, persistent: Persistent) = - withCurrentPersistent(persistent)(receive.applyOrElse(_, unhandled)) - } - - /** - * Initial state, waits for `Recover` request, then changes to `recoveryStarted`. - */ - private val recoveryPending = new State { - override def toString: String = "recovery pending" - - def aroundReceive(receive: Actor.Receive, message: Any): Unit = message match { - case Recover(fromSnap, toSnr) ⇒ - _currentState = recoveryStarted - snapshotStore ! LoadSnapshot(processorId, fromSnap, toSnr) - case _ ⇒ processorStash.stash() - } - } - - /** - * Processes a loaded snapshot and replayed messages, if any. If processing of the loaded - * snapshot fails, the exception is thrown immediately. If processing of a replayed message - * fails, the exception is caught and stored for being thrown later and state is changed to - * `recoveryFailed`. - */ - private val recoveryStarted = new State { - override def toString: String = "recovery started" - - def aroundReceive(receive: Actor.Receive, message: Any) = message match { - case LoadSnapshotResult(sso, toSnr) ⇒ sso match { - case Some(SelectedSnapshot(metadata, snapshot)) ⇒ - process(receive, SnapshotOffer(metadata, snapshot)) - journal ! Replay(metadata.sequenceNr + 1L, toSnr, processorId, self) - case None ⇒ - journal ! Replay(1L, toSnr, processorId, self) - } - case ReplaySuccess(maxSnr) ⇒ - _currentState = recoverySucceeded - _sequenceNr = maxSnr - processorStash.unstashAll() - case ReplayFailure(cause) ⇒ - val notification = RecoveryFailure(cause) - if (receive.isDefinedAt(notification)) process(receive, notification) - else { - val errorMsg = s"Replay failure by journal (processor id = [${processorId}])" - throw new RecoveryFailureException(errorMsg, cause) - } - case Replayed(p) ⇒ try { processPersistent(receive, p) } catch { - case t: Throwable ⇒ - _currentState = recoveryFailed // delay throwing exception to prepareRestart - _recoveryFailureCause = t - _recoveryFailureMessage = currentEnvelope - } - case r: Recover ⇒ // ignore - case _ ⇒ processorStash.stash() + def aroundReceive(receive: Receive, message: Any) = message match { + case ReadHighestSequenceNrSuccess(highest) ⇒ + _currentState = processing + sequenceNr = highest + receiverStash.unstashAll() + case ReadHighestSequenceNrFailure(cause) ⇒ + onRecoveryFailure(receive, cause) + case other ⇒ + receiverStash.stash() } } /** * Journals and processes new messages, both persistent and transient. */ - private val recoverySucceeded = new State { - override def toString: String = "recovery finished" + private val processing = new State { + override def toString: String = "processing" private var batching = false - def aroundReceive(receive: Actor.Receive, message: Any) = message match { - case r: Recover ⇒ // ignore - case Replayed(p) ⇒ processPersistent(receive, p) // can occur after unstash from user stash - case WriteSuccess(p) ⇒ processPersistent(receive, p) - case WriteFailure(p, cause) ⇒ + def aroundReceive(receive: Receive, message: Any) = message match { + case r: Recover ⇒ // ignore + case ReplayedMessage(p) ⇒ processPersistent(receive, p) // can occur after unstash from user stash + case WriteMessageSuccess(p) ⇒ processPersistent(receive, p) + case WriteMessageFailure(p, cause) ⇒ val notification = PersistenceFailure(p.payload, p.sequenceNr, cause) if (receive.isDefinedAt(notification)) process(receive, notification) else { @@ -152,8 +94,8 @@ trait Processor extends Actor with Stash with StashFactory { "To avoid killing processors on persistence failure, a processor must handle PersistenceFailure messages." throw new ActorKilledException(errorMsg) } - case LoopSuccess(m) ⇒ process(receive, m) - case WriteBatchSuccess | WriteBatchFailure(_) ⇒ + case LoopMessageSuccess(m) ⇒ process(receive, m) + case WriteMessagesSuccess | WriteMessagesFailure(_) ⇒ if (processorBatch.isEmpty) batching = false else journalBatch() case p: PersistentRepr ⇒ addToBatch(p) @@ -166,7 +108,7 @@ trait Processor extends Actor with Stash with StashFactory { case m ⇒ // submit all batched messages before looping this message if (processorBatch.isEmpty) batching = false else journalBatch() - journal forward Loop(m, self) + journal forward LoopMessage(m, self) } def addToBatch(p: PersistentRepr): Unit = @@ -176,67 +118,49 @@ trait Processor extends Actor with Stash with StashFactory { pb.persistentReprList.foreach(addToBatch) def maxBatchSizeReached: Boolean = - processorBatch.length >= maxBatchSize + processorBatch.length >= extension.settings.journal.maxMessageBatchSize def journalBatch(): Unit = { - journal ! WriteBatch(processorBatch, self) + journal ! WriteMessages(processorBatch, self) processorBatch = Vector.empty batching = true } } /** - * Consumes remaining replayed messages and then changes to `prepareRestart`. The - * message that caused the exception during replay, is re-added to the mailbox and - * re-received in `prepareRestart`. + * INTERNAL API. + * + * Switches to `initializing` state and requests the highest stored sequence number from the journal. */ - private val recoveryFailed = new State { - override def toString: String = "recovery failed" - - def aroundReceive(receive: Actor.Receive, message: Any) = message match { - case ReplayFailure(_) ⇒ - replayCompleted() - // journal couldn't tell the maximum stored sequence number, hence the next - // replay must be a full replay (up to the highest stored sequence number) - _lastSequenceNr = Long.MaxValue - case ReplaySuccess(_) ⇒ replayCompleted() - case Replayed(p) ⇒ updateLastSequenceNr(p) - case r: Recover ⇒ // ignore - case _ ⇒ processorStash.stash() - } - - def replayCompleted(): Unit = { - _currentState = prepareRestart - mailbox.enqueueFirst(self, _recoveryFailureMessage) - } + private[persistence] def onReplaySuccess(receive: Receive, awaitReplay: Boolean): Unit = { + _currentState = initializing + journal ! ReadHighestSequenceNr(lastSequenceNr, processorId, self) } /** - * Re-receives the replayed message that causes an exception during replay and throws - * that exception. + * INTERNAL API. */ - private val prepareRestart = new State { - override def toString: String = "prepare restart" + private[persistence] def onReplayFailure(receive: Receive, awaitReplay: Boolean, cause: Throwable): Unit = + onRecoveryFailure(receive, cause) - def aroundReceive(receive: Actor.Receive, message: Any) = message match { - case Replayed(_) ⇒ throw _recoveryFailureCause - case _ ⇒ // ignore + /** + * Invokes this processor's behavior with a `RecoveryFailure` message, if handled, otherwise throws a + * `RecoveryFailureException`. + */ + private def onRecoveryFailure(receive: Receive, cause: Throwable): Unit = { + val notification = RecoveryFailure(cause) + if (receive.isDefinedAt(notification)) { + receive(notification) + } else { + val errorMsg = s"Recovery failure by journal (processor id = [${processorId}])" + throw new RecoveryException(errorMsg, cause) } } + private val _processorId = extension.processorId(self) + private var processorBatch = Vector.empty[PersistentRepr] - - private var _sequenceNr: Long = 0L - private var _lastSequenceNr: Long = 0L - - private var _currentPersistent: Persistent = _ - private var _currentState: State = recoveryPending - - private var _recoveryFailureCause: Throwable = _ - private var _recoveryFailureMessage: Envelope = _ - - private lazy val journal = extension.journalFor(processorId) - private lazy val snapshotStore = extension.snapshotStoreFor(processorId) + private var sequenceNr: Long = 0L /** * Processor id. Defaults to this processor's path and can be overridden. @@ -244,30 +168,21 @@ trait Processor extends Actor with Stash with StashFactory { def processorId: String = _processorId /** - * Highest received sequence number so far or `0L` if this processor hasn't received - * a persistent message yet. Usually equal to the sequence number of `currentPersistentMessage` - * (unless a processor implementation is about to re-order persistent messages using - * `stash()` and `unstash()`). + * Returns `processorId`. */ - def lastSequenceNr: Long = _lastSequenceNr + def snapshotterId: String = processorId /** * Returns `true` if this processor is currently recovering. */ def recoveryRunning: Boolean = - _currentState == recoveryStarted || - _currentState == prepareRestart + _currentState != processing /** * Returns `true` if this processor has successfully finished recovery. */ def recoveryFinished: Boolean = - _currentState == recoverySucceeded - - /** - * Returns the current persistent message if there is one. - */ - implicit def currentPersistentMessage: Option[Persistent] = Option(_currentPersistent) + _currentState == processing /** * Marks a persistent message, identified by `sequenceNr`, as deleted. A message marked as deleted is @@ -289,23 +204,20 @@ trait Processor extends Actor with Stash with StashFactory { * Processors that want to re-receive that persistent message during recovery should not call * this method. * - * Later extensions may also allow a replay of messages that have been marked as deleted which can - * be useful in debugging environments. - * * @param sequenceNr sequence number of the persistent message to be deleted. * @param permanent if `false`, the message is marked as deleted, otherwise it is permanently deleted. */ def deleteMessage(sequenceNr: Long, permanent: Boolean): Unit = { - journal ! Delete(processorId, sequenceNr, sequenceNr, permanent) + journal ! DeleteMessages(List(PersistentIdImpl(processorId, sequenceNr)), permanent) } /** - * Marks all persistent messages with sequence numbers less than or equal `toSequenceNr` as deleted. + * Permanently deletes all persistent messages with sequence numbers less than or equal `toSequenceNr`. * * @param toSequenceNr upper sequence number bound of persistent messages to be deleted. */ def deleteMessages(toSequenceNr: Long): Unit = { - deleteMessages(toSequenceNr, false) + deleteMessages(toSequenceNr, true) } /** @@ -313,59 +225,11 @@ trait Processor extends Actor with Stash with StashFactory { * is set to `false`, the persistent messages are marked as deleted in the journal, otherwise * they permanently deleted from the journal. * - * Later extensions may also allow a replay of messages that have been marked as deleted which can - * be useful in debugging environments. - * * @param toSequenceNr upper sequence number bound of persistent messages to be deleted. * @param permanent if `false`, the message is marked as deleted, otherwise it is permanently deleted. */ def deleteMessages(toSequenceNr: Long, permanent: Boolean): Unit = { - journal ! Delete(processorId, 1L, toSequenceNr, permanent) - } - - /** - * Saves a `snapshot` of this processor's state. If saving succeeds, this processor will receive a - * [[SaveSnapshotSuccess]] message, otherwise a [[SaveSnapshotFailure]] message. - */ - def saveSnapshot(snapshot: Any): Unit = { - snapshotStore ! SaveSnapshot(SnapshotMetadata(processorId, lastSequenceNr), snapshot) - } - - /** - * Deletes a snapshot identified by `sequenceNr` and `timestamp`. - */ - def deleteSnapshot(sequenceNr: Long, timestamp: Long): Unit = { - snapshotStore ! DeleteSnapshot(SnapshotMetadata(processorId, sequenceNr, timestamp)) - } - - /** - * Deletes all snapshots matching `criteria`. - */ - def deleteSnapshots(criteria: SnapshotSelectionCriteria): Unit = { - snapshotStore ! DeleteSnapshots(processorId, criteria) - } - - /** - * INTERNAL API. - */ - protected[persistence] def withCurrentPersistent(persistent: Persistent)(body: Persistent ⇒ Unit): Unit = try { - _currentPersistent = persistent - updateLastSequenceNr(persistent) - body(persistent) - } finally _currentPersistent = null - - /** - * INTERNAL API. - */ - protected[persistence] def updateLastSequenceNr(persistent: Persistent) { - if (persistent.sequenceNr > _lastSequenceNr) _lastSequenceNr = persistent.sequenceNr - } - - /** - * INTERNAL API. - */ - override protected[akka] def aroundReceive(receive: Actor.Receive, message: Any): Unit = { - _currentState.aroundReceive(receive, message) + journal ! DeleteMessagesTo(processorId, toSequenceNr, permanent) } /** @@ -387,15 +251,15 @@ trait Processor extends Actor with Stash with StashFactory { */ final override protected[akka] def aroundPreRestart(reason: Throwable, message: Option[Any]): Unit = { try { - processorStash.prepend(processorBatch.map(p ⇒ Envelope(p, p.sender, context.system))) - processorStash.unstashAll() + receiverStash.prepend(processorBatch.map(p ⇒ Envelope(p, p.sender, context.system))) + receiverStash.unstashAll() unstashAll(unstashFilterPredicate) } finally { message match { - case Some(WriteSuccess(m)) ⇒ preRestartDefault(reason, Some(m)) - case Some(LoopSuccess(m)) ⇒ preRestartDefault(reason, Some(m)) - case Some(Replayed(m)) ⇒ preRestartDefault(reason, Some(m)) - case mo ⇒ preRestartDefault(reason, None) + case Some(WriteMessageSuccess(m)) ⇒ preRestartDefault(reason, Some(m)) + case Some(LoopMessageSuccess(m)) ⇒ preRestartDefault(reason, Some(m)) + case Some(ReplayedMessage(m)) ⇒ preRestartDefault(reason, Some(m)) + case mo ⇒ preRestartDefault(reason, None) } } } @@ -429,36 +293,44 @@ trait Processor extends Actor with Stash with StashFactory { } private def nextSequenceNr(): Long = { - _sequenceNr += 1L - _sequenceNr + sequenceNr += 1L + sequenceNr } - // ----------------------------------------------------- - // Processor-internal stash - // ----------------------------------------------------- - private val unstashFilterPredicate: Any ⇒ Boolean = { - case _: WriteSuccess ⇒ false - case _: Replayed ⇒ false - case _ ⇒ true + case _: WriteMessageSuccess ⇒ false + case _: ReplayedMessage ⇒ false + case _ ⇒ true } - - private val processorStash = createStash() - - private def currentEnvelope: Envelope = - context.asInstanceOf[ActorCell].currentMessage } /** - * Sent to a [[Processor]] when a journal failed to write a [[Persistent]] message. If + * Sent to a [[Processor]] if a journal fails to write a [[Persistent]] message. If * not handled, an `akka.actor.ActorKilledException` is thrown by that processor. * * @param payload payload of the persistent message. * @param sequenceNr sequence number of the persistent message. * @param cause failure cause. */ +@SerialVersionUID(1L) case class PersistenceFailure(payload: Any, sequenceNr: Long, cause: Throwable) +/** + * Sent to a [[Processor]] if a journal fails to replay messages or fetch that processor's + * highest sequence number. If not handled, a [[RecoveryException]] is thrown by that + * processor. + */ +@SerialVersionUID(1L) +case class RecoveryFailure(cause: Throwable) + +/** + * Thrown by a [[Processor]] if a journal fails to replay messages or fetch that processor's + * highest sequence number. This exception is only thrown if that processor doesn't handle + * [[RecoveryFailure]] messages. + */ +@SerialVersionUID(1L) +case class RecoveryException(message: String, cause: Throwable) extends AkkaException(message, cause) + /** * Java API: an actor that persists (journals) messages of type [[Persistent]]. Messages of other types * are not persisted. @@ -513,9 +385,4 @@ case class PersistenceFailure(payload: Any, sequenceNr: Long, cause: Throwable) * @see [[Recover]] * @see [[PersistentBatch]] */ -abstract class UntypedProcessor extends UntypedActor with Processor { - /** - * Java API. returns the current persistent message or `null` if there is none. - */ - def getCurrentPersistentMessage = currentPersistentMessage.getOrElse(null) -} +abstract class UntypedProcessor extends UntypedActor with Processor diff --git a/akka-persistence/src/main/scala/akka/persistence/Recover.scala b/akka-persistence/src/main/scala/akka/persistence/Recover.scala deleted file mode 100644 index 5e0efa4bb1..0000000000 --- a/akka-persistence/src/main/scala/akka/persistence/Recover.scala +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ - -package akka.persistence - -import akka.AkkaException - -/** - * Instructs a processor to recover itself. Recovery will start from a snapshot if the processor has - * previously saved one or more snapshots and at least one of these snapshots matches the specified - * `fromSnapshot` criteria. Otherwise, recovery will start from scratch by replaying all journaled - * messages. - * - * If recovery starts from a snapshot, the processor is offered that snapshot with a [[SnapshotOffer]] - * message, followed by replayed messages, if any, that are younger than the snapshot, up to the - * specified upper sequence number bound (`toSequenceNr`). - * - * @param fromSnapshot criteria for selecting a saved snapshot from which recovery should start. Default - * is latest (= youngest) snapshot. - * @param toSequenceNr upper sequence number bound (inclusive) for recovery. Default is no upper bound. - */ -@SerialVersionUID(1L) -case class Recover(fromSnapshot: SnapshotSelectionCriteria = SnapshotSelectionCriteria.Latest, toSequenceNr: Long = Long.MaxValue) - -object Recover { - /** - * Java API. - * - * @see [[Recover]] - */ - def create() = Recover() - - /** - * Java API. - * - * @see [[Recover]] - */ - def create(toSequenceNr: Long) = - Recover(toSequenceNr = toSequenceNr) - - /** - * Java API. - * - * @see [[Recover]] - */ - def create(fromSnapshot: SnapshotSelectionCriteria) = - Recover(fromSnapshot = fromSnapshot) - - /** - * Java API. - * - * @see [[Recover]] - */ - def create(fromSnapshot: SnapshotSelectionCriteria, toSequenceNr: Long) = - Recover(fromSnapshot, toSequenceNr) -} - -/** - * Sent to a [[Processor]] after failed recovery. If not handled, a - * [[RecoveryFailureException]] is thrown by that processor. - */ -@SerialVersionUID(1L) -case class RecoveryFailure(cause: Throwable) - -/** - * Thrown by a [[Processor]] if a journal failed to replay all requested messages. - */ -@SerialVersionUID(1L) -case class RecoveryFailureException(message: String, cause: Throwable) extends AkkaException(message, cause) - diff --git a/akka-persistence/src/main/scala/akka/persistence/Recovery.scala b/akka-persistence/src/main/scala/akka/persistence/Recovery.scala new file mode 100644 index 0000000000..30764ee1a2 --- /dev/null +++ b/akka-persistence/src/main/scala/akka/persistence/Recovery.scala @@ -0,0 +1,303 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ + +package akka.persistence + +import akka.actor._ +import akka.dispatch.Envelope +import akka.persistence.JournalProtocol._ +import akka.persistence.SnapshotProtocol.LoadSnapshotResult + +/** + * Recovery state machine that loads snapshots and replays messages. + * + * @see [[Processor]] + * @see [[View]] + */ +trait Recovery extends Actor with Snapshotter with Stash with StashFactory { + /** + * INTERNAL API. + * + * Recovery state. + */ + private[persistence] trait State { + def aroundReceive(receive: Receive, message: Any): Unit + + protected def process(receive: Receive, message: Any) = + receive.applyOrElse(message, unhandled) + + protected def processPersistent(receive: Receive, persistent: Persistent) = + withCurrentPersistent(persistent)(receive.applyOrElse(_, unhandled)) + + protected def updateLastSequenceNr(persistent: Persistent): Unit = + if (persistent.sequenceNr > _lastSequenceNr) _lastSequenceNr = persistent.sequenceNr + + def updateLastSequenceNr(value: Long): Unit = + _lastSequenceNr = value + + protected def withCurrentPersistent(persistent: Persistent)(body: Persistent ⇒ Unit): Unit = try { + _currentPersistent = persistent + updateLastSequenceNr(persistent) + body(persistent) + } finally _currentPersistent = null + + protected def recordFailure(cause: Throwable): Unit = { + _recoveryFailureCause = cause + _recoveryFailureMessage = context.asInstanceOf[ActorCell].currentMessage + } + } + + /** + * INTERNAL API. + * + * Initial state, waits for `Recover` request, submit a `LoadSnapshot` request to the snapshot + * store and changes to `recoveryStarted` state. + */ + private[persistence] val recoveryPending = new State { + override def toString: String = "recovery pending" + + def aroundReceive(receive: Receive, message: Any): Unit = message match { + case Recover(fromSnap, toSnr, replayMax) ⇒ + _currentState = recoveryStarted(replayMax) + loadSnapshot(snapshotterId, fromSnap, toSnr) + case _ ⇒ receiverStash.stash() + } + } + + /** + * INTERNAL API. + * + * Processes a loaded snapshot, if any. A loaded snapshot is offered with a `SnapshotOffer` + * message to the actor's current behavior. Then initiates a message replay, either starting + * from the loaded snapshot or from scratch, and switches to `replayStarted` state. + * + * @param replayMax maximum number of messages to replay. + */ + private[persistence] def recoveryStarted(replayMax: Long) = new State { + override def toString: String = s"recovery started (replayMax = [${replayMax}])" + + def aroundReceive(receive: Receive, message: Any) = message match { + case r: Recover ⇒ // ignore + case LoadSnapshotResult(sso, toSnr) ⇒ + sso.foreach { + case SelectedSnapshot(metadata, snapshot) ⇒ + updateLastSequenceNr(metadata.sequenceNr) + process(receive, SnapshotOffer(metadata, snapshot)) + } + _currentState = replayStarted(await = true) + journal ! ReplayMessages(lastSequenceNr + 1L, toSnr, replayMax, processorId, self) + case other ⇒ receiverStash.stash() + } + } + + /** + * INTERNAL API. + * + * Processes replayed messages, if any. The actor's current behavior is invoked with the replayed + * [[Persistent]] messages. If processing of a replayed message fails, the exception is caught and + * stored for being thrown later and state is changed to `recoveryFailed`. If replay succeeds the + * `onReplaySuccess` method is called, otherwise `onReplayFailure`. + * + * @param await if `true` processing of further messages will be delayed until replay completes, + * otherwise, the actor's behavior is invoked immediately with these messages. + */ + private[persistence] def replayStarted(await: Boolean) = new State { + override def toString: String = s"replay started (await = [${await}])" + + def aroundReceive(receive: Receive, message: Any) = message match { + case r: Recover ⇒ // ignore + case ReplayedMessage(p) ⇒ try { processPersistent(receive, p) } catch { + case t: Throwable ⇒ + _currentState = replayFailed // delay throwing exception to prepareRestart + recordFailure(t) + } + case ReplayMessagesSuccess ⇒ onReplaySuccess(receive, await) + case ReplayMessagesFailure(cause) ⇒ onReplayFailure(receive, await, cause) + case other ⇒ + if (await) receiverStash.stash() else process(receive, other) + } + } + + /** + * INTERNAL API. + * + * Consumes remaining replayed messages and then changes to `prepareRestart`. The + * message that caused the exception during replay, is re-added to the mailbox and + * re-received in `prepareRestart`. + */ + private[persistence] val replayFailed = new State { + override def toString: String = "replay failed" + + def aroundReceive(receive: Receive, message: Any) = message match { + case ReplayMessagesFailure(_) ⇒ + replayCompleted() + // journal couldn't tell the maximum stored sequence number, hence the next + // replay must be a full replay (up to the highest stored sequence number) + updateLastSequenceNr(Long.MaxValue) + case ReplayMessagesSuccess ⇒ replayCompleted() + case ReplayedMessage(p) ⇒ updateLastSequenceNr(p) + case r: Recover ⇒ // ignore + case _ ⇒ receiverStash.stash() + } + + def replayCompleted(): Unit = { + _currentState = prepareRestart + mailbox.enqueueFirst(self, _recoveryFailureMessage) + } + } + + /** + * INTERNAL API. + * + * Re-receives the replayed message that caused an exception and re-throws that exception. + */ + private[persistence] val prepareRestart = new State { + override def toString: String = "prepare restart" + + def aroundReceive(receive: Receive, message: Any) = message match { + case ReplayedMessage(_) ⇒ throw _recoveryFailureCause + case _ ⇒ // ignore + } + } + + private var _recoveryFailureCause: Throwable = _ + private var _recoveryFailureMessage: Envelope = _ + + private var _lastSequenceNr: Long = 0L + private var _currentPersistent: Persistent = _ + + /** + * Id of the processor for which messages should be replayed. + */ + def processorId: String + + /** + * Returns the current persistent message if there is any. + */ + implicit def currentPersistentMessage: Option[Persistent] = Option(_currentPersistent) + + /** + * Java API: returns the current persistent message or `null` if there is none. + */ + def getCurrentPersistentMessage = currentPersistentMessage.getOrElse(null) + + /** + * Highest received sequence number so far or `0L` if this actor hasn't received a persistent + * message yet. Usually equal to the sequence number of `currentPersistentMessage` (unless a + * receiver implementation is about to re-order persistent messages using `stash()` and `unstash()`). + */ + def lastSequenceNr: Long = _lastSequenceNr + + /** + * Returns `lastSequenceNr`. + */ + def snapshotSequenceNr: Long = lastSequenceNr + + /** + * INTERNAL API. + */ + private[persistence] var _currentState: State = recoveryPending + + /** + * INTERNAL API. + * + * Called whenever a message replay succeeds. + * + * @param receive the actor's current behavior. + * @param awaitReplay `awaitReplay` value of the calling `replayStarted` state. + */ + private[persistence] def onReplaySuccess(receive: Receive, awaitReplay: Boolean): Unit + + /** + * INTERNAL API. + * + * Called whenever a message replay fails. + * + * @param receive the actor's current behavior. + * @param awaitReplay `awaitReplay` value of the calling `replayStarted` state. + * @param cause failure cause. + */ + private[persistence] def onReplayFailure(receive: Receive, awaitReplay: Boolean, cause: Throwable): Unit + + /** + * INTERNAL API. + */ + private[persistence] val extension = Persistence(context.system) + + /** + * INTERNAL API. + */ + private[persistence] lazy val journal = extension.journalFor(processorId) + + /** + * INTERNAL API. + */ + private[persistence] val receiverStash = createStash() + + /** + * INTERNAL API. + */ + override protected[akka] def aroundReceive(receive: Receive, message: Any): Unit = { + _currentState.aroundReceive(receive, message) + } +} + +/** + * Instructs a processor to recover itself. Recovery will start from a snapshot if the processor has + * previously saved one or more snapshots and at least one of these snapshots matches the specified + * `fromSnapshot` criteria. Otherwise, recovery will start from scratch by replaying all journaled + * messages. + * + * If recovery starts from a snapshot, the processor is offered that snapshot with a [[SnapshotOffer]] + * message, followed by replayed messages, if any, that are younger than the snapshot, up to the + * specified upper sequence number bound (`toSequenceNr`). + * + * @param fromSnapshot criteria for selecting a saved snapshot from which recovery should start. Default + * is latest (= youngest) snapshot. + * @param toSequenceNr upper sequence number bound (inclusive) for recovery. Default is no upper bound. + * @param replayMax maximum number of messages to replay. Default is no limit. + */ +@SerialVersionUID(1L) +case class Recover(fromSnapshot: SnapshotSelectionCriteria = SnapshotSelectionCriteria.Latest, toSequenceNr: Long = Long.MaxValue, replayMax: Long = Long.MaxValue) + +object Recover { + /** + * Java API. + * + * @see [[Recover]] + */ + def create() = Recover() + + /** + * Java API. + * + * @see [[Recover]] + */ + def create(toSequenceNr: Long) = + Recover(toSequenceNr = toSequenceNr) + + /** + * Java API. + * + * @see [[Recover]] + */ + def create(fromSnapshot: SnapshotSelectionCriteria) = + Recover(fromSnapshot = fromSnapshot) + + /** + * Java API. + * + * @see [[Recover]] + */ + def create(fromSnapshot: SnapshotSelectionCriteria, toSequenceNr: Long) = + Recover(fromSnapshot, toSequenceNr) + + /** + * Java API. + * + * @see [[Recover]] + */ + def create(fromSnapshot: SnapshotSelectionCriteria, toSequenceNr: Long, replayMax: Long) = + Recover(fromSnapshot, toSequenceNr, replayMax) +} diff --git a/akka-persistence/src/main/scala/akka/persistence/Snapshot.scala b/akka-persistence/src/main/scala/akka/persistence/Snapshot.scala index adcef8426e..57737a638c 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Snapshot.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Snapshot.scala @@ -101,7 +101,7 @@ case class SelectedSnapshot(metadata: SnapshotMetadata, snapshot: Any) object SelectedSnapshot { /** - * Plugin Java API. + * Java API, Plugin API. */ def create(metadata: SnapshotMetadata, snapshot: Any): SelectedSnapshot = SelectedSnapshot(metadata, snapshot) diff --git a/akka-persistence/src/main/scala/akka/persistence/Snapshotter.scala b/akka-persistence/src/main/scala/akka/persistence/Snapshotter.scala new file mode 100644 index 0000000000..e5642e6da5 --- /dev/null +++ b/akka-persistence/src/main/scala/akka/persistence/Snapshotter.scala @@ -0,0 +1,51 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ + +package akka.persistence + +import akka.actor._ +import akka.persistence.SnapshotProtocol._ + +/** + * Snapshot API on top of the internal snapshot protocol. + */ +trait Snapshotter extends Actor { + private lazy val snapshotStore = Persistence(context.system).snapshotStoreFor(snapshotterId) + + /** + * Snapshotter id. + */ + def snapshotterId: String + + /** + * Sequence number to use when taking a snapshot. + */ + def snapshotSequenceNr: Long + + def loadSnapshot(processorId: String, criteria: SnapshotSelectionCriteria, toSequenceNr: Long) = + snapshotStore ! LoadSnapshot(processorId, criteria, toSequenceNr) + + /** + * Saves a `snapshot` of this snapshotter's state. If saving succeeds, this snapshotter will receive a + * [[SaveSnapshotSuccess]] message, otherwise a [[SaveSnapshotFailure]] message. + */ + def saveSnapshot(snapshot: Any): Unit = { + snapshotStore ! SaveSnapshot(SnapshotMetadata(snapshotterId, snapshotSequenceNr), snapshot) + } + + /** + * Deletes a snapshot identified by `sequenceNr` and `timestamp`. + */ + def deleteSnapshot(sequenceNr: Long, timestamp: Long): Unit = { + snapshotStore ! DeleteSnapshot(SnapshotMetadata(snapshotterId, sequenceNr, timestamp)) + } + + /** + * Deletes all snapshots matching `criteria`. + */ + def deleteSnapshots(criteria: SnapshotSelectionCriteria): Unit = { + snapshotStore ! DeleteSnapshots(snapshotterId, criteria) + } + +} diff --git a/akka-persistence/src/main/scala/akka/persistence/View.scala b/akka-persistence/src/main/scala/akka/persistence/View.scala new file mode 100644 index 0000000000..00d544d6e3 --- /dev/null +++ b/akka-persistence/src/main/scala/akka/persistence/View.scala @@ -0,0 +1,200 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ + +package akka.persistence + +import scala.concurrent.duration._ + +import akka.actor._ +import akka.persistence.JournalProtocol._ + +/** + * Instructs a [[View]] to update itself. This will run a single incremental message replay with all + * messages from the corresponding processor's journal that have not yet been consumed by the view. + * To update a view with messages that have been written after handling this request, another `Update` + * request must be sent to the view. + * + * @param await if `true`, processing of further messages sent to the view will be delayed until the + * incremental message replay, triggered by this update request, completes. If `false`, + * any message sent to the view may interleave with replayed [[Persistent]] message + * stream. + * @param replayMax maximum number of messages to replay when handling this update request. Defaults + * to `Long.MaxValue` (i.e. no limit). + */ +@SerialVersionUID(1L) +case class Update(await: Boolean = false, replayMax: Long = Long.MaxValue) + +case object Update { + /** + * Java API. + */ + def create() = + Update() + + /** + * Java API. + */ + def create(await: Boolean) = + Update(await) + + /** + * Java API. + */ + def create(await: Boolean, replayMax: Long) = + Update(await, replayMax) +} + +/** + * A view replicates the persistent message stream of a processor. Implementation classes receive the + * message stream as [[Persistent]] messages. These messages can be processed to update internal state + * in order to maintain an (eventual consistent) view of the state of the corresponding processor. A + * view can also run on a different node, provided that a replicated journal is used. Implementation + * classes reference a processor by implementing `processorId`. + * + * Views can also store snapshots of internal state by calling [[saveSnapshot]]. The snapshots of a view + * are independent of those of the referenced processor. During recovery, a saved snapshot is offered + * to the view with a [[SnapshotOffer]] message, followed by replayed messages, if any, that are younger + * than the snapshot. Default is to offer the latest saved snapshot. + * + * By default, a view automatically updates itself with an interval returned by `autoUpdateInterval`. + * This method can be overridden by implementation classes to define a view instance-specific update + * interval. The default update interval for all views of an actor system can be configured with the + * `akka.persistence.view.auto-update-interval` configuration key. Applications may trigger additional + * view updates by sending the view [[Update]] requests. See also methods + * + * - [[autoUpdate]] for turning automated updates on or off + * - [[autoUpdateReplayMax]] for limiting the number of replayed messages per view update cycle and + * - [[autoRecoveryReplayMax]] for limiting the number of replayed messages on initial view recovery + * + * Views can also use channels to communicate with destinations in the same way as processors can do. + */ +trait View extends Actor with Recovery { + import context.dispatcher + + /** + * INTERNAL API. + * + * Extends the `replayStarted` state of [[Recovery]] with logic to handle [[Update]] requests + * sent by users. + */ + private[persistence] override def replayStarted(await: Boolean) = new State { + private var delegateAwaiting = await + private var delegate = View.super.replayStarted(await) + + override def toString: String = delegate.toString + + override def aroundReceive(receive: Receive, message: Any) = message match { + case Update(false, _) ⇒ // ignore + case u @ Update(true, _) if !delegateAwaiting ⇒ + delegateAwaiting = true + delegate = View.super.replayStarted(await = true) + delegate.aroundReceive(receive, u) + case other ⇒ + delegate.aroundReceive(receive, other) + } + } + + /** + * When receiving an [[Update]] request, switches to `replayStarted` state and triggers + * an incremental message replay. Invokes the actor's current behavior for any other + * received message. + */ + private val idle: State = new State { + override def toString: String = "idle" + + def aroundReceive(receive: Receive, message: Any): Unit = message match { + case r: Recover ⇒ // ignore + case Update(awaitUpdate, replayMax) ⇒ + _currentState = replayStarted(await = awaitUpdate) + journal ! ReplayMessages(lastSequenceNr + 1L, Long.MaxValue, replayMax, processorId, self) + case other ⇒ process(receive, other) + } + } + + /** + * INTERNAL API. + */ + private[persistence] def onReplaySuccess(receive: Receive, await: Boolean): Unit = + onReplayComplete(await) + + /** + * INTERNAL API. + */ + private[persistence] def onReplayFailure(receive: Receive, await: Boolean, cause: Throwable): Unit = + onReplayComplete(await) + + /** + * Switches to `idle` state and schedules the next update if `autoUpdate` returns `true`. + */ + private def onReplayComplete(await: Boolean): Unit = { + _currentState = idle + if (autoUpdate) schedule = Some(context.system.scheduler.scheduleOnce(autoUpdateInterval, self, Update(await = false))) + if (await) receiverStash.unstashAll() + } + + private val _viewId = extension.processorId(self) + private val viewSettings = extension.settings.view + + private var schedule: Option[Cancellable] = None + + /** + * View id. Defaults to this view's path and can be overridden. + */ + def viewId: String = _viewId + + /** + * Returns `viewId`. + */ + def snapshotterId: String = viewId + + /** + * If `true`, this view automatically updates itself with an interval specified by `autoUpdateInterval`. + * If `false`, applications must explicitly update this view by sending [[Update]] requests. The default + * value can be configured with the `akka.persistence.view.auto-update` configuration key. This method + * can be overridden by implementation classes to return non-default values. + */ + def autoUpdate: Boolean = + viewSettings.autoUpdate + + /** + * The interval for automated updates. The default value can be configured with the + * `akka.persistence.view.auto-update-interval` configuration key. This method can be + * overridden by implementation classes to return non-default values. + */ + def autoUpdateInterval: FiniteDuration = + viewSettings.autoUpdateInterval + + /** + * The maximum number of messages to replay per update. The default value can be configured with the + * `akka.persistence.view.auto-update-replay-max` configuration key. This method can be overridden by + * implementation classes to return non-default values. + */ + def autoUpdateReplayMax: Long = + viewSettings.autoUpdateReplayMax + + /** + * Triggers an initial recovery, starting form a snapshot, if any, and replaying at most `autoRecoveryReplayMax` + * messages (following that snapshot). + */ + override def preStart(): Unit = { + super.preStart() + self ! Recover(replayMax = autoUpdateReplayMax) + } + + override def preRestart(reason: Throwable, message: Option[Any]): Unit = { + try receiverStash.unstashAll() finally super.preRestart(reason, message) + } + + override def postStop(): Unit = { + schedule.foreach(_.cancel()) + super.postStop() + } +} + +/** + * Java API. + * + * @see [[View]] + */ +abstract class UntypedView extends UntypedActor with View diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncReplay.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala similarity index 60% rename from akka-persistence/src/main/scala/akka/persistence/journal/AsyncReplay.scala rename to akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala index 43488be6a2..e597953398 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncReplay.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala @@ -9,17 +9,16 @@ import scala.concurrent.Future import akka.persistence.PersistentRepr /** - * Asynchronous message replay interface. + * Asynchronous message replay and sequence number recovery interface. */ -trait AsyncReplay { +trait AsyncRecovery { //#journal-plugin-api /** * Plugin API: asynchronously replays persistent messages. Implementations replay * a message by calling `replayCallback`. The returned future must be completed - * when all messages (matching the sequence number bounds) have been replayed. The - * future `Long` value must be the highest stored sequence number in the journal - * for the specified processor. The future must be completed with a failure if any - * of the persistent messages could not be replayed. + * when all messages (matching the sequence number bounds) have been replayed. + * The future must be completed with a failure if any of the persistent messages + * could not be replayed. * * The `replayCallback` must also be called with messages that have been marked * as deleted. In this case a replayed message's `deleted` method must return @@ -31,12 +30,23 @@ trait AsyncReplay { * @param processorId processor id. * @param fromSequenceNr sequence number where replay should start (inclusive). * @param toSequenceNr sequence number where replay should end (inclusive). + * @param max maximum number of messages to be replayed. * @param replayCallback called to replay a single message. Can be called from any * thread. * * @see [[AsyncWriteJournal]] * @see [[SyncWriteJournal]] */ - def replayAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: PersistentRepr ⇒ Unit): Future[Long] + def asyncReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: PersistentRepr ⇒ Unit): Future[Unit] + + /** + * Plugin API: asynchronously reads the highest stored sequence number for the + * given `processorId`. + * + * @param processorId processor id. + * @param fromSequenceNr hint where to start searching for the highest sequence + * number. + */ + def asyncReadHighestSequenceNr(processorId: String, fromSequenceNr: Long): Future[Long] //#journal-plugin-api } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala index 13e03bbd93..f15632d689 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala @@ -12,69 +12,73 @@ import scala.util._ import akka.actor._ import akka.pattern.pipe import akka.persistence._ -import akka.persistence.JournalProtocol._ /** * Abstract journal, optimized for asynchronous, non-blocking writes. */ -trait AsyncWriteJournal extends Actor with AsyncReplay { +trait AsyncWriteJournal extends Actor with AsyncRecovery { + import JournalProtocol._ import AsyncWriteJournal._ import context.dispatcher private val extension = Persistence(context.system) + private val publish = extension.settings.internal.publishPluginCommands private val resequencer = context.actorOf(Props[Resequencer]) private var resequencerCounter = 1L def receive = { - case WriteBatch(persistentBatch, processor) ⇒ + case WriteMessages(persistentBatch, processor) ⇒ val cctr = resequencerCounter def resequence(f: PersistentRepr ⇒ Any) = persistentBatch.zipWithIndex.foreach { case (p, i) ⇒ resequencer ! Desequenced(f(p), cctr + i + 1, processor, p.sender) } - writeAsync(persistentBatch.map(_.prepareWrite())) onComplete { + asyncWriteMessages(persistentBatch.map(_.prepareWrite())) onComplete { case Success(_) ⇒ - resequencer ! Desequenced(WriteBatchSuccess, cctr, processor, self) - resequence(WriteSuccess(_)) + resequencer ! Desequenced(WriteMessagesSuccess, cctr, processor, self) + resequence(WriteMessageSuccess(_)) case Failure(e) ⇒ - resequencer ! Desequenced(WriteBatchFailure(e), cctr, processor, self) - resequence(WriteFailure(_, e)) + resequencer ! Desequenced(WriteMessagesFailure(e), cctr, processor, self) + resequence(WriteMessageFailure(_, e)) } resequencerCounter += persistentBatch.length + 1 - case Replay(fromSequenceNr, toSequenceNr, processorId, processor) ⇒ + case ReplayMessages(fromSequenceNr, toSequenceNr, max, processorId, processor, replayDeleted) ⇒ // Send replayed messages and replay result to processor directly. No need // to resequence replayed messages relative to written and looped messages. - replayAsync(processorId, fromSequenceNr, toSequenceNr) { p ⇒ - if (!p.deleted) processor.tell(Replayed(p), p.sender) + asyncReplayMessages(processorId, fromSequenceNr, toSequenceNr, max) { p ⇒ + if (!p.deleted || replayDeleted) processor.tell(ReplayedMessage(p), p.sender) } map { - maxSnr ⇒ ReplaySuccess(maxSnr) + case _ ⇒ ReplayMessagesSuccess } recover { - case e ⇒ ReplayFailure(e) + case e ⇒ ReplayMessagesFailure(e) } pipeTo (processor) - case c @ Confirm(processorId, messageSequenceNr, channelId, wrapperSequenceNr, channelEndpoint) ⇒ - val op = if (wrapperSequenceNr == 0L) { - // A wrapperSequenceNr == 0L means that the corresponding message was delivered by a - // transient channel. We can now write a delivery confirmation for this message. - confirmAsync(processorId, messageSequenceNr, channelId) - } else { - // A wrapperSequenceNr != 0L means that the corresponding message was delivered by a - // persistent channel. We can now safely delete the wrapper message (that contains the - // delivered message). - deleteAsync(channelId, wrapperSequenceNr, wrapperSequenceNr, true) + case ReadHighestSequenceNr(fromSequenceNr, processorId, processor) ⇒ + // Send read highest sequence number to processor directly. No need + // to resequence the result relative to written and looped messages. + asyncReadHighestSequenceNr(processorId, fromSequenceNr).map { + highest ⇒ ReadHighestSequenceNrSuccess(highest) + } recover { + case e ⇒ ReadHighestSequenceNrFailure(e) + } pipeTo (processor) + case c @ WriteConfirmations(confirmationsBatch, requestor) ⇒ + asyncWriteConfirmations(confirmationsBatch) onComplete { + case Success(_) ⇒ requestor ! WriteConfirmationsSuccess(confirmationsBatch) + case Failure(e) ⇒ requestor ! WriteConfirmationsFailure(e) } - op onComplete { + case d @ DeleteMessages(messageIds, permanent, requestorOption) ⇒ + asyncDeleteMessages(messageIds, permanent) onComplete { case Success(_) ⇒ - if (extension.publishPluginCommands) context.system.eventStream.publish(c) - if (channelEndpoint != null) channelEndpoint ! c - case Failure(e) ⇒ // TODO: publish failure to event stream + requestorOption.foreach(_ ! DeleteMessagesSuccess(messageIds)) + if (publish) context.system.eventStream.publish(d) + case Failure(e) ⇒ } - case d @ Delete(processorId, fromSequenceNr, toSequenceNr, permanent) ⇒ - deleteAsync(processorId, fromSequenceNr, toSequenceNr, permanent) onComplete { - case Success(_) ⇒ if (extension.publishPluginCommands) context.system.eventStream.publish(d) - case Failure(e) ⇒ // TODO: publish failure to event stream + case d @ DeleteMessagesTo(processorId, toSequenceNr, permanent) ⇒ + asyncDeleteMessagesTo(processorId, toSequenceNr, permanent) onComplete { + case Success(_) ⇒ if (publish) context.system.eventStream.publish(d) + case Failure(e) ⇒ } - case Loop(message, processor) ⇒ - resequencer ! Desequenced(LoopSuccess(message), resequencerCounter, processor, sender) + case LoopMessage(message, processor) ⇒ + resequencer ! Desequenced(LoopMessageSuccess(message), resequencerCounter, processor, sender) resequencerCounter += 1 } @@ -84,22 +88,26 @@ trait AsyncWriteJournal extends Actor with AsyncReplay { * The batch write must be atomic i.e. either all persistent messages in the batch * are written or none. */ - def writeAsync(persistentBatch: immutable.Seq[PersistentRepr]): Future[Unit] + def asyncWriteMessages(messages: immutable.Seq[PersistentRepr]): Future[Unit] /** - * Plugin API: asynchronously deletes all persistent messages within the range from - * `fromSequenceNr` to `toSequenceNr` (both inclusive). If `permanent` is set to - * `false`, the persistent messages are marked as deleted, otherwise they are - * permanently deleted. - * - * @see [[AsyncReplay]] + * Plugin API: asynchronously writes a batch of delivery confirmations to the journal. */ - def deleteAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean): Future[Unit] + def asyncWriteConfirmations(confirmations: immutable.Seq[PersistentConfirmation]): Future[Unit] /** - * Plugin API: asynchronously writes a delivery confirmation to the journal. + * Plugin API: asynchronously deletes messages identified by `messageIds` from the + * journal. If `permanent` is set to `false`, the persistent messages are marked as + * deleted, otherwise they are permanently deleted. */ - def confirmAsync(processorId: String, sequenceNr: Long, channelId: String): Future[Unit] + def asyncDeleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean): Future[Unit] + + /** + * Plugin API: asynchronously deletes all persistent messages up to `toSequenceNr` + * (inclusive). If `permanent` is set to `false`, the persistent messages are marked + * as deleted, otherwise they are permanently deleted. + */ + def asyncDeleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean): Future[Unit] //#journal-plugin-api } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala index 880015d2cf..9a1368b73a 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala @@ -37,21 +37,27 @@ private[persistence] trait AsyncWriteProxy extends AsyncWriteJournal with Stash implicit def timeout: Timeout - def writeAsync(persistentBatch: immutable.Seq[PersistentRepr]): Future[Unit] = - (store ? WriteBatch(persistentBatch)).mapTo[Unit] + def asyncWriteMessages(messages: immutable.Seq[PersistentRepr]): Future[Unit] = + (store ? WriteMessages(messages)).mapTo[Unit] - def deleteAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean): Future[Unit] = - (store ? Delete(processorId, fromSequenceNr, toSequenceNr, permanent)).mapTo[Unit] + def asyncWriteConfirmations(confirmations: immutable.Seq[PersistentConfirmation]): Future[Unit] = + (store ? WriteConfirmations(confirmations)).mapTo[Unit] - def confirmAsync(processorId: String, sequenceNr: Long, channelId: String): Future[Unit] = - (store ? Confirm(processorId, sequenceNr, channelId)).mapTo[Unit] + def asyncDeleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean): Future[Unit] = + (store ? DeleteMessages(messageIds, permanent)).mapTo[Unit] - def replayAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: (PersistentRepr) ⇒ Unit): Future[Long] = { - val replayCompletionPromise = Promise[Long] + def asyncDeleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean): Future[Unit] = + (store ? DeleteMessagesTo(processorId, toSequenceNr, permanent)).mapTo[Unit] + + def asyncReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: (PersistentRepr) ⇒ Unit): Future[Unit] = { + val replayCompletionPromise = Promise[Unit] val mediator = context.actorOf(Props(classOf[ReplayMediator], replayCallback, replayCompletionPromise, timeout.duration).withDeploy(Deploy.local)) - store.tell(Replay(processorId, fromSequenceNr, toSequenceNr), mediator) + store.tell(ReplayMessages(processorId, fromSequenceNr, toSequenceNr, max), mediator) replayCompletionPromise.future } + + def asyncReadHighestSequenceNr(processorId: String, fromSequenceNr: Long): Future[Long] = + (store ? ReadHighestSequenceNr(processorId, fromSequenceNr)).mapTo[Long] } /** @@ -66,22 +72,28 @@ private[persistence] object AsyncWriteProxy { */ private[persistence] object AsyncWriteTarget { @SerialVersionUID(1L) - case class WriteBatch(pb: immutable.Seq[PersistentRepr]) + case class WriteMessages(messages: immutable.Seq[PersistentRepr]) @SerialVersionUID(1L) - case class Delete(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean) + case class WriteConfirmations(confirmations: immutable.Seq[PersistentConfirmation]) @SerialVersionUID(1L) - case class Confirm(processorId: String, sequenceNr: Long, channelId: String) + case class DeleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean) @SerialVersionUID(1L) - case class Replay(processorId: String, fromSequenceNr: Long, toSequenceNr: Long) + case class DeleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean) @SerialVersionUID(1L) - case class ReplaySuccess(maxSequenceNr: Long) + case class ReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long) + + @SerialVersionUID(1L) + case object ReplaySuccess @SerialVersionUID(1L) case class ReplayFailure(cause: Throwable) + + @SerialVersionUID(1L) + case class ReadHighestSequenceNr(processorId: String, fromSequenceNr: Long) } /** @@ -90,15 +102,15 @@ private[persistence] object AsyncWriteTarget { @SerialVersionUID(1L) class AsyncReplayTimeoutException(msg: String) extends AkkaException(msg) -private class ReplayMediator(replayCallback: PersistentRepr ⇒ Unit, replayCompletionPromise: Promise[Long], replayTimeout: Duration) extends Actor { +private class ReplayMediator(replayCallback: PersistentRepr ⇒ Unit, replayCompletionPromise: Promise[Unit], replayTimeout: Duration) extends Actor { import AsyncWriteTarget._ context.setReceiveTimeout(replayTimeout) def receive = { case p: PersistentRepr ⇒ replayCallback(p) - case ReplaySuccess(maxSnr) ⇒ - replayCompletionPromise.success(maxSnr) + case ReplaySuccess ⇒ + replayCompletionPromise.success(()) context.stop(self) case ReplayFailure(cause) ⇒ replayCompletionPromise.failure(cause) diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/SyncWriteJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/SyncWriteJournal.scala index 1145957918..4f330314b5 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/SyncWriteJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/SyncWriteJournal.scala @@ -15,49 +15,58 @@ import akka.persistence._ /** * Abstract journal, optimized for synchronous writes. */ -trait SyncWriteJournal extends Actor with AsyncReplay { +trait SyncWriteJournal extends Actor with AsyncRecovery { import JournalProtocol._ import context.dispatcher private val extension = Persistence(context.system) + private val publish = extension.settings.internal.publishPluginCommands final def receive = { - case WriteBatch(persistentBatch, processor) ⇒ - Try(write(persistentBatch.map(_.prepareWrite()))) match { + case WriteMessages(persistentBatch, processor) ⇒ + Try(writeMessages(persistentBatch.map(_.prepareWrite()))) match { case Success(_) ⇒ - processor ! WriteBatchSuccess - persistentBatch.foreach(p ⇒ processor.tell(WriteSuccess(p), p.sender)) + processor ! WriteMessagesSuccess + persistentBatch.foreach(p ⇒ processor.tell(WriteMessageSuccess(p), p.sender)) case Failure(e) ⇒ - processor ! WriteBatchFailure(e) - persistentBatch.foreach(p ⇒ processor tell (WriteFailure(p, e), p.sender)) + processor ! WriteMessagesFailure(e) + persistentBatch.foreach(p ⇒ processor tell (WriteMessageFailure(p, e), p.sender)) throw e } - case Replay(fromSequenceNr, toSequenceNr, processorId, processor) ⇒ - replayAsync(processorId, fromSequenceNr, toSequenceNr) { p ⇒ - if (!p.deleted) processor.tell(Replayed(p), p.sender) + case ReplayMessages(fromSequenceNr, toSequenceNr, max, processorId, processor, replayDeleted) ⇒ + asyncReplayMessages(processorId, fromSequenceNr, toSequenceNr, max) { p ⇒ + if (!p.deleted || replayDeleted) processor.tell(ReplayedMessage(p), p.sender) } map { - maxSnr ⇒ ReplaySuccess(maxSnr) + case _ ⇒ ReplayMessagesSuccess } recover { - case e ⇒ ReplayFailure(e) + case e ⇒ ReplayMessagesFailure(e) } pipeTo (processor) - case c @ Confirm(processorId, messageSequenceNr, channelId, wrapperSequenceNr, channelEndpoint) ⇒ - if (wrapperSequenceNr == 0L) { - // A wrapperSequenceNr == 0L means that the corresponding message was delivered by a - // transient channel. We can now write a delivery confirmation for this message. - confirm(processorId, messageSequenceNr, channelId) - } else { - // A wrapperSequenceNr != 0L means that the corresponding message was delivered by a - // persistent channel. We can now safely delete the wrapper message (that contains the - // delivered message). - delete(channelId, wrapperSequenceNr, wrapperSequenceNr, true) + case ReadHighestSequenceNr(fromSequenceNr, processorId, processor) ⇒ + asyncReadHighestSequenceNr(processorId, fromSequenceNr).map { + highest ⇒ ReadHighestSequenceNrSuccess(highest) + } recover { + case e ⇒ ReadHighestSequenceNrFailure(e) + } pipeTo (processor) + case WriteConfirmations(confirmationsBatch, requestor) ⇒ + Try(writeConfirmations(confirmationsBatch)) match { + case Success(_) ⇒ requestor ! WriteConfirmationsSuccess(confirmationsBatch) + case Failure(e) ⇒ requestor ! WriteConfirmationsFailure(e) } - if (channelEndpoint != null) channelEndpoint ! c - if (extension.publishPluginCommands) context.system.eventStream.publish(c) - case d @ Delete(processorId, fromSequenceNr, toSequenceNr, permanent) ⇒ - delete(processorId, fromSequenceNr, toSequenceNr, permanent) - if (extension.publishPluginCommands) context.system.eventStream.publish(d) - case Loop(message, processor) ⇒ - processor forward LoopSuccess(message) + case d @ DeleteMessages(messageIds, permanent, requestorOption) ⇒ + Try(deleteMessages(messageIds, permanent)) match { + case Success(_) ⇒ + requestorOption.foreach(_ ! DeleteMessagesSuccess(messageIds)) + if (publish) context.system.eventStream.publish(d) + case Failure(e) ⇒ + requestorOption.foreach(_ ! DeleteMessagesFailure(e)) + } + case d @ DeleteMessagesTo(processorId, toSequenceNr, permanent) ⇒ + Try(deleteMessagesTo(processorId, toSequenceNr, permanent)) match { + case Success(_) ⇒ if (publish) context.system.eventStream.publish(d) + case Failure(e) ⇒ + } + case LoopMessage(message, processor) ⇒ + processor forward LoopMessageSuccess(message) } //#journal-plugin-api @@ -66,21 +75,25 @@ trait SyncWriteJournal extends Actor with AsyncReplay { * The batch write must be atomic i.e. either all persistent messages in the batch * are written or none. */ - def write(persistentBatch: immutable.Seq[PersistentRepr]): Unit + def writeMessages(messages: immutable.Seq[PersistentRepr]): Unit /** - * Plugin API: synchronously deletes all persistent messages within the range from - * `fromSequenceNr` to `toSequenceNr` (both inclusive). If `permanent` is set to - * `false`, the persistent messages are marked as deleted, otherwise they are - * permanently deleted. - * - * @see [[AsyncReplay]] + * Plugin API: synchronously writes a batch of delivery confirmations to the journal. */ - def delete(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean): Unit + def writeConfirmations(confirmations: immutable.Seq[PersistentConfirmation]): Unit /** - * Plugin API: synchronously writes a delivery confirmation to the journal. + * Plugin API: synchronously deletes messages identified by `messageIds` from the + * journal. If `permanent` is set to `false`, the persistent messages are marked as + * deleted, otherwise they are permanently deleted. */ - def confirm(processorId: String, sequenceNr: Long, channelId: String): Unit + def deleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean): Unit + + /** + * Plugin API: synchronously deletes all persistent messages up to `toSequenceNr` + * (inclusive). If `permanent` is set to `false`, the persistent messages are marked + * as deleted, otherwise they are permanently deleted. + */ + def deleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean): Unit //#journal-plugin-api } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala index 6037702165..3d886bb3cd 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala @@ -34,7 +34,7 @@ private[persistence] class InmemJournal extends AsyncWriteProxy { * INTERNAL API. */ private[persistence] trait InmemMessages { - // processor id => persistent message + // processor id -> persistent message var messages = Map.empty[String, Vector[PersistentRepr]] def add(p: PersistentRepr) = messages = messages + (messages.get(p.processorId) match { @@ -52,18 +52,21 @@ private[persistence] trait InmemMessages { case None ⇒ messages } - def read(pid: String, fromSnr: Long, toSnr: Long): immutable.Seq[PersistentRepr] = messages.get(pid) match { - case Some(ms) ⇒ ms.filter(m ⇒ m.sequenceNr >= fromSnr && m.sequenceNr <= toSnr) + def read(pid: String, fromSnr: Long, toSnr: Long, max: Long): immutable.Seq[PersistentRepr] = messages.get(pid) match { + case Some(ms) ⇒ ms.filter(m ⇒ m.sequenceNr >= fromSnr && m.sequenceNr <= toSnr).take(safeLongToInt(max)) case None ⇒ Nil } - def maxSequenceNr(pid: String): Long = { + def highestSequenceNr(pid: String): Long = { val snro = for { ms ← messages.get(pid) m ← ms.lastOption } yield m.sequenceNr snro.getOrElse(0L) } + + private def safeLongToInt(l: Long): Int = + if (Int.MaxValue < l) Int.MaxValue else l.toInt } /** @@ -73,16 +76,22 @@ private[persistence] class InmemStore extends Actor with InmemMessages { import AsyncWriteTarget._ def receive = { - case WriteBatch(pb) ⇒ - sender ! pb.foreach(add) - case Delete(pid, fsnr, tsnr, false) ⇒ - sender ! (fsnr to tsnr foreach { snr ⇒ update(pid, snr)(_.update(deleted = true)) }) - case Delete(pid, fsnr, tsnr, true) ⇒ - sender ! (fsnr to tsnr foreach { snr ⇒ delete(pid, snr) }) - case Confirm(pid, snr, cid) ⇒ - sender ! update(pid, snr)(p ⇒ p.update(confirms = cid +: p.confirms)) - case Replay(pid, fromSnr, toSnr) ⇒ - read(pid, fromSnr, toSnr).foreach(sender ! _) - sender ! ReplaySuccess(maxSequenceNr(pid)) + case WriteMessages(msgs) ⇒ + sender ! msgs.foreach(add) + case WriteConfirmations(cnfs) ⇒ + sender ! cnfs.foreach(cnf ⇒ update(cnf.processorId, cnf.sequenceNr)(p ⇒ p.update(confirms = cnf.channelId +: p.confirms))) + case DeleteMessages(msgIds, false) ⇒ + sender ! msgIds.foreach(msgId ⇒ update(msgId.processorId, msgId.sequenceNr)(_.update(deleted = true))) + case DeleteMessages(msgIds, true) ⇒ + sender ! msgIds.foreach(msgId ⇒ delete(msgId.processorId, msgId.sequenceNr)) + case DeleteMessagesTo(pid, tsnr, false) ⇒ + sender ! (1L to tsnr foreach { snr ⇒ update(pid, snr)(_.update(deleted = true)) }) + case DeleteMessagesTo(pid, tsnr, true) ⇒ + sender ! (1L to tsnr foreach { snr ⇒ delete(pid, snr) }) + case ReplayMessages(pid, fromSnr, toSnr, max) ⇒ + read(pid, fromSnr, toSnr, max).foreach(sender ! _) + sender ! ReplaySuccess + case ReadHighestSequenceNr(processorId, _) ⇒ + sender ! highestSequenceNr(processorId) } } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala new file mode 100644 index 0000000000..a7434a1ebb --- /dev/null +++ b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala @@ -0,0 +1,27 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ + +package akka.persistence.journal.japi + +import scala.concurrent.Future + +import akka.actor.Actor +import akka.japi.Procedure +import akka.persistence.journal.{ AsyncRecovery ⇒ SAsyncReplay } +import akka.persistence.PersistentRepr + +/** + * Java API: asynchronous message replay and sequence number recovery interface. + */ +abstract class AsyncRecovery extends SAsyncReplay with AsyncRecoveryPlugin { this: Actor ⇒ + import context.dispatcher + + final def asyncReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: (PersistentRepr) ⇒ Unit) = + doAsyncReplayMessages(processorId, fromSequenceNr, toSequenceNr, max, new Procedure[PersistentRepr] { + def apply(p: PersistentRepr) = replayCallback(p) + }).map(Unit.unbox) + + final def asyncReadHighestSequenceNr(processorId: String, fromSequenceNr: Long): Future[Long] = + doAsyncReadHighestSequenceNr(processorId, fromSequenceNr: Long).map(_.longValue) +} diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncReplay.scala b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncReplay.scala deleted file mode 100644 index db99821bd9..0000000000 --- a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncReplay.scala +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ - -package akka.persistence.journal.japi - -import java.lang.{ Long ⇒ JLong } - -import scala.concurrent.Future - -import akka.actor.Actor -import akka.japi.Procedure -import akka.persistence.journal.{ AsyncReplay ⇒ SAsyncReplay } -import akka.persistence.PersistentRepr - -/** - * Java API: asynchronous message replay interface. - */ -abstract class AsyncReplay extends SAsyncReplay with AsyncReplayPlugin { this: Actor ⇒ - import context.dispatcher - - final def replayAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: (PersistentRepr) ⇒ Unit) = - doReplayAsync(processorId, fromSequenceNr, toSequenceNr, new Procedure[PersistentRepr] { - def apply(p: PersistentRepr) = replayCallback(p) - }).map(_.longValue) -} diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncWriteJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncWriteJournal.scala index 58a87ba03c..53cb3aa75b 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncWriteJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncWriteJournal.scala @@ -7,21 +7,24 @@ package akka.persistence.journal.japi import scala.collection.immutable import scala.collection.JavaConverters._ +import akka.persistence._ import akka.persistence.journal.{ AsyncWriteJournal ⇒ SAsyncWriteJournal } -import akka.persistence.PersistentRepr /** * Java API: abstract journal, optimized for asynchronous, non-blocking writes. */ -abstract class AsyncWriteJournal extends AsyncReplay with SAsyncWriteJournal with AsyncWritePlugin { +abstract class AsyncWriteJournal extends AsyncRecovery with SAsyncWriteJournal with AsyncWritePlugin { import context.dispatcher - final def writeAsync(persistentBatch: immutable.Seq[PersistentRepr]) = - doWriteAsync(persistentBatch.asJava).map(Unit.unbox) + final def asyncWriteMessages(messages: immutable.Seq[PersistentRepr]) = + doAsyncWriteMessages(messages.asJava).map(Unit.unbox) - final def deleteAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean) = - doDeleteAsync(processorId, fromSequenceNr, toSequenceNr, permanent).map(Unit.unbox) + final def asyncWriteConfirmations(confirmations: immutable.Seq[PersistentConfirmation]) = + doAsyncWriteConfirmations(confirmations.asJava).map(Unit.unbox) - final def confirmAsync(processorId: String, sequenceNr: Long, channelId: String) = - doConfirmAsync(processorId, sequenceNr, channelId).map(Unit.unbox) + final def asyncDeleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean) = + doAsyncDeleteMessages(messageIds.asJava, permanent).map(Unit.unbox) + + final def asyncDeleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean) = + doAsyncDeleteMessagesTo(processorId, toSequenceNr, permanent).map(Unit.unbox) } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/japi/SyncWriteJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/japi/SyncWriteJournal.scala index 894cbc9cc2..b60922e4c2 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/japi/SyncWriteJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/japi/SyncWriteJournal.scala @@ -7,19 +7,22 @@ package akka.persistence.journal.japi import scala.collection.immutable import scala.collection.JavaConverters._ +import akka.persistence._ import akka.persistence.journal.{ SyncWriteJournal ⇒ SSyncWriteJournal } -import akka.persistence.PersistentRepr /** * Java API: abstract journal, optimized for synchronous writes. */ -abstract class SyncWriteJournal extends AsyncReplay with SSyncWriteJournal with SyncWritePlugin { - final def write(persistentBatch: immutable.Seq[PersistentRepr]) = - doWrite(persistentBatch.asJava) +abstract class SyncWriteJournal extends AsyncRecovery with SSyncWriteJournal with SyncWritePlugin { + final def writeMessages(messages: immutable.Seq[PersistentRepr]) = + doWriteMessages(messages.asJava) - final def delete(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean) = - doDelete(processorId, fromSequenceNr, toSequenceNr, permanent) + final def writeConfirmations(confirmations: immutable.Seq[PersistentConfirmation]) = + doWriteConfirmations(confirmations.asJava) - final def confirm(processorId: String, sequenceNr: Long, channelId: String) = - doConfirm(processorId, sequenceNr, channelId) + final def deleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean) = + doDeleteMessages(messageIds.asJava, permanent) + + final def deleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean) = + doDeleteMessagesTo(processorId, toSequenceNr, permanent) } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala index 82147c8b7d..b46700b412 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala @@ -28,14 +28,9 @@ private[persistence] trait LeveldbIdMapping extends Actor { this: LeveldbStore case Some(v) ⇒ v } - private def readIdMap(): Map[String, Int] = { - val iter = leveldbIterator - try { - iter.seek(keyToBytes(idKey(idOffset))) - readIdMap(Map.empty, iter) - } finally { - iter.close() - } + private def readIdMap(): Map[String, Int] = withIterator { iter ⇒ + iter.seek(keyToBytes(idKey(idOffset))) + readIdMap(Map.empty, iter) } private def readIdMap(pathMap: Map[String, Int], iter: DBIterator): Map[String, Int] = { diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbReplay.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala similarity index 55% rename from akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbReplay.scala rename to akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala index adb69fc915..ba0370833e 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbReplay.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala @@ -8,27 +8,29 @@ package akka.persistence.journal.leveldb import scala.concurrent.Future import akka.persistence._ -import akka.persistence.journal.AsyncReplay +import akka.persistence.journal.AsyncRecovery +import org.iq80.leveldb.DBIterator /** * INTERNAL API. * - * LevelDB backed message replay. + * LevelDB backed message replay and sequence number recovery. */ -private[persistence] trait LeveldbReplay extends AsyncReplay { this: LeveldbStore ⇒ +private[persistence] trait LeveldbRecovery extends AsyncRecovery { this: LeveldbStore ⇒ import Key._ private lazy val replayDispatcherId = config.getString("replay-dispatcher") private lazy val replayDispatcher = context.system.dispatchers.lookup(replayDispatcherId) - def replayAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: PersistentRepr ⇒ Unit): Future[Long] = - Future(replay(numericId(processorId), fromSequenceNr: Long, toSequenceNr)(replayCallback))(replayDispatcher) + def asyncReadHighestSequenceNr(processorId: String, fromSequenceNr: Long): Future[Long] = + Future(readHighestSequenceNr(numericId(processorId)))(replayDispatcher) - def replay(processorId: Int, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: PersistentRepr ⇒ Unit): Long = { - val iter = leveldbIterator + def asyncReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: PersistentRepr ⇒ Unit): Future[Unit] = + Future(replayMessages(numericId(processorId), fromSequenceNr: Long, toSequenceNr, max: Long)(replayCallback))(replayDispatcher) + def replayMessages(processorId: Int, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: PersistentRepr ⇒ Unit): Unit = { @scala.annotation.tailrec - def go(key: Key, replayCallback: PersistentRepr ⇒ Unit) { + def go(iter: DBIterator, key: Key, ctr: Long, replayCallback: PersistentRepr ⇒ Unit) { if (iter.hasNext) { val nextEntry = iter.next() val nextKey = keyFromBytes(nextEntry.getKey) @@ -36,31 +38,33 @@ private[persistence] trait LeveldbReplay extends AsyncReplay { this: LeveldbStor // end iteration here } else if (nextKey.channelId != 0) { // phantom confirmation (just advance iterator) - go(nextKey, replayCallback) + go(iter, nextKey, ctr, replayCallback) } else if (key.processorId == nextKey.processorId) { val msg = persistentFromBytes(nextEntry.getValue) - val del = deletion(nextKey) - val cnf = confirms(nextKey, Nil) - replayCallback(msg.update(confirms = cnf, deleted = del)) - go(nextKey, replayCallback) + val del = deletion(iter, nextKey) + val cnf = confirms(iter, nextKey, Nil) + if (ctr < max) { + replayCallback(msg.update(confirms = cnf, deleted = del)) + go(iter, nextKey, ctr + 1L, replayCallback) + } } } } @scala.annotation.tailrec - def confirms(key: Key, channelIds: List[String]): List[String] = { + def confirms(iter: DBIterator, key: Key, channelIds: List[String]): List[String] = { if (iter.hasNext) { val nextEntry = iter.peekNext() val nextKey = keyFromBytes(nextEntry.getKey) if (key.processorId == nextKey.processorId && key.sequenceNr == nextKey.sequenceNr) { val nextValue = new String(nextEntry.getValue, "UTF-8") iter.next() - confirms(nextKey, nextValue :: channelIds) + confirms(iter, nextKey, nextValue :: channelIds) } else channelIds } else channelIds } - def deletion(key: Key): Boolean = { + def deletion(iter: DBIterator, key: Key): Boolean = { if (iter.hasNext) { val nextEntry = iter.peekNext() val nextKey = keyFromBytes(nextEntry.getKey) @@ -71,17 +75,14 @@ private[persistence] trait LeveldbReplay extends AsyncReplay { this: LeveldbStor } else false } - try { + withIterator { iter ⇒ val startKey = Key(processorId, if (fromSequenceNr < 1L) 1L else fromSequenceNr, 0) iter.seek(keyToBytes(startKey)) - go(startKey, replayCallback) - maxSequenceNr(processorId) - } finally { - iter.close() + go(iter, startKey, 0L, replayCallback) } } - def maxSequenceNr(processorId: Int) = { + def readHighestSequenceNr(processorId: Int) = { leveldb.get(keyToBytes(counterKey(processorId)), leveldbSnapshot) match { case null ⇒ 0L case bytes ⇒ counterFromBytes(bytes) diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala index 607a1d65f7..e1d10d2243 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala @@ -20,7 +20,7 @@ import akka.serialization.SerializationExtension /** * INTERNAL API. */ -private[persistence] trait LeveldbStore extends Actor with LeveldbIdMapping with LeveldbReplay { +private[persistence] trait LeveldbStore extends Actor with LeveldbIdMapping with LeveldbRecovery { val configPath: String val config = context.system.settings.config.getConfig(configPath) @@ -44,36 +44,47 @@ private[persistence] trait LeveldbStore extends Actor with LeveldbIdMapping with import Key._ - def write(persistentBatch: immutable.Seq[PersistentRepr]) = - withBatch(batch ⇒ persistentBatch.foreach(persistent ⇒ addToBatch(persistent, batch))) + def writeMessages(messages: immutable.Seq[PersistentRepr]) = + withBatch(batch ⇒ messages.foreach(message ⇒ addToMessageBatch(message, batch))) - def delete(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean) = withBatch { batch ⇒ - val nid = numericId(processorId) - if (permanent) fromSequenceNr to toSequenceNr foreach { sequenceNr ⇒ - batch.delete(keyToBytes(Key(nid, sequenceNr, 0))) // TODO: delete confirmations and deletion markers, if any. - } - else fromSequenceNr to toSequenceNr foreach { sequenceNr ⇒ - batch.put(keyToBytes(deletionKey(nid, sequenceNr)), Array.empty[Byte]) + def writeConfirmations(confirmations: immutable.Seq[PersistentConfirmation]) = + withBatch(batch ⇒ confirmations.foreach(confirmation ⇒ addToConfirmationBatch(confirmation, batch))) + + def deleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean) = withBatch { batch ⇒ + messageIds foreach { messageId ⇒ + if (permanent) batch.delete(keyToBytes(Key(numericId(messageId.processorId), messageId.sequenceNr, 0))) + else batch.put(keyToBytes(deletionKey(numericId(messageId.processorId), messageId.sequenceNr)), Array.emptyByteArray) } } - def confirm(processorId: String, sequenceNr: Long, channelId: String) { - leveldb.put(keyToBytes(Key(numericId(processorId), sequenceNr, numericId(channelId))), channelId.getBytes("UTF-8")) + def deleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean) = withBatch { batch ⇒ + val nid = numericId(processorId) + + // seek to first existing message + val fromSequenceNr = withIterator { iter ⇒ + val startKey = Key(nid, 1L, 0) + iter.seek(keyToBytes(startKey)) + if (iter.hasNext) keyFromBytes(iter.peekNext().getKey).sequenceNr else Long.MaxValue + } + + fromSequenceNr to toSequenceNr foreach { sequenceNr ⇒ + if (permanent) batch.delete(keyToBytes(Key(nid, sequenceNr, 0))) // TODO: delete confirmations and deletion markers, if any. + else batch.put(keyToBytes(deletionKey(nid, sequenceNr)), Array.emptyByteArray) + } } def leveldbSnapshot = leveldbReadOptions.snapshot(leveldb.getSnapshot) - def leveldbIterator = leveldb.iterator(leveldbSnapshot) - def persistentToBytes(p: PersistentRepr): Array[Byte] = serialization.serialize(p).get - def persistentFromBytes(a: Array[Byte]): PersistentRepr = serialization.deserialize(a, classOf[PersistentRepr]).get - - private def addToBatch(persistent: PersistentRepr, batch: WriteBatch): Unit = { - val nid = numericId(persistent.processorId) - batch.put(keyToBytes(counterKey(nid)), counterToBytes(persistent.sequenceNr)) - batch.put(keyToBytes(Key(nid, persistent.sequenceNr, 0)), persistentToBytes(persistent)) + def withIterator[R](body: DBIterator ⇒ R): R = { + val iterator = leveldb.iterator(leveldbSnapshot) + try { + body(iterator) + } finally { + iterator.close() + } } - private def withBatch[R](body: WriteBatch ⇒ R): R = { + def withBatch[R](body: WriteBatch ⇒ R): R = { val batch = leveldb.createWriteBatch() try { val r = body(batch) @@ -84,6 +95,21 @@ private[persistence] trait LeveldbStore extends Actor with LeveldbIdMapping with } } + def persistentToBytes(p: PersistentRepr): Array[Byte] = serialization.serialize(p).get + def persistentFromBytes(a: Array[Byte]): PersistentRepr = serialization.deserialize(a, classOf[PersistentRepr]).get + + private def addToMessageBatch(persistent: PersistentRepr, batch: WriteBatch): Unit = { + val nid = numericId(persistent.processorId) + batch.put(keyToBytes(counterKey(nid)), counterToBytes(persistent.sequenceNr)) + batch.put(keyToBytes(Key(nid, persistent.sequenceNr, 0)), persistentToBytes(persistent)) + } + + private def addToConfirmationBatch(confirmation: PersistentConfirmation, batch: WriteBatch): Unit = { + val npid = numericId(confirmation.processorId) + val ncid = numericId(confirmation.channelId) + batch.put(keyToBytes(Key(npid, confirmation.sequenceNr, ncid)), confirmation.channelId.getBytes("UTF-8")) + } + override def preStart() { leveldb = leveldbFactory.open(leveldbDir, if (nativeLeveldb) leveldbOptions else leveldbOptions.compressionType(CompressionType.NONE)) super.preStart() @@ -104,17 +130,14 @@ class SharedLeveldbStore extends { val configPath = "akka.persistence.journal.le import AsyncWriteTarget._ def receive = { - case WriteBatch(pb) ⇒ sender ! write(pb) - case Delete(pid, fsnr, tsnr, permanent) ⇒ sender ! delete(pid, fsnr, tsnr, permanent) - case Confirm(pid, snr, cid) ⇒ sender ! confirm(pid, snr, cid) - case Replay(pid, fromSnr, toSnr) ⇒ - val npid = numericId(pid) - val res = for { - _ ← Try(replay(npid, fromSnr, toSnr)(sender ! _)) - max ← Try(maxSequenceNr(npid)) - } yield max - res match { - case Success(max) ⇒ sender ! ReplaySuccess(max) + case WriteMessages(msgs) ⇒ sender ! writeMessages(msgs) + case WriteConfirmations(cnfs) ⇒ sender ! writeConfirmations(cnfs) + case DeleteMessages(messageIds, permanent) ⇒ sender ! deleteMessages(messageIds, permanent) + case DeleteMessagesTo(pid, tsnr, permanent) ⇒ sender ! deleteMessagesTo(pid, tsnr, permanent) + case ReadHighestSequenceNr(pid, fromSequenceNr) ⇒ sender ! readHighestSequenceNr(numericId(pid)) + case ReplayMessages(pid, fromSnr, toSnr, max) ⇒ + Try(replayMessages(numericId(pid), fromSnr, toSnr, max)(sender ! _)) match { + case Success(max) ⇒ sender ! ReplaySuccess case Failure(cause) ⇒ sender ! ReplayFailure(cause) } } diff --git a/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala b/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala index a80e57f55c..b8aed696dd 100644 --- a/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala +++ b/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala @@ -8,12 +8,10 @@ import scala.language.existentials import com.google.protobuf._ -import akka.actor.ExtendedActorSystem +import akka.actor.{ ActorPath, ExtendedActorSystem } import akka.japi.Util.immutableSeq import akka.persistence._ -import akka.persistence.JournalProtocol.Confirm import akka.persistence.serialization.MessageFormats._ -import akka.persistence.serialization.MessageFormats.DeliverMessage.ResolveStrategy import akka.serialization._ /** @@ -31,7 +29,8 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer { val PersistentReprClass = classOf[PersistentRepr] val PersistentImplClass = classOf[PersistentImpl] val ConfirmablePersistentImplClass = classOf[ConfirmablePersistentImpl] - val ConfirmClass = classOf[Confirm] + val DeliveredByTransientChannelClass = classOf[DeliveredByChannel] + val DeliveredByPersistentChannelClass = classOf[DeliveredByPersistentChannel] val DeliverClass = classOf[Deliver] def identifier: Int = 7 @@ -42,11 +41,12 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer { * serialization of a persistent message's payload to a matching `akka.serialization.Serializer`. */ def toBinary(o: AnyRef): Array[Byte] = o match { - case b: PersistentBatch ⇒ persistentMessageBatchBuilder(b).build().toByteArray - case p: PersistentRepr ⇒ persistentMessageBuilder(p).build().toByteArray - case c: Confirm ⇒ confirmMessageBuilder(c).build().toByteArray - case d: Deliver ⇒ deliverMessageBuilder(d).build.toByteArray - case _ ⇒ throw new IllegalArgumentException(s"Can't serialize object of type ${o.getClass}") + case b: PersistentBatch ⇒ persistentMessageBatchBuilder(b).build().toByteArray + case p: PersistentRepr ⇒ persistentMessageBuilder(p).build().toByteArray + case c: DeliveredByChannel ⇒ deliveredMessageBuilder(c).build().toByteArray + case c: DeliveredByPersistentChannel ⇒ deliveredMessageBuilder(c).build().toByteArray + case d: Deliver ⇒ deliverMessageBuilder(d).build.toByteArray + case _ ⇒ throw new IllegalArgumentException(s"Can't serialize object of type ${o.getClass}") } /** @@ -56,13 +56,14 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer { def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): Message = manifest match { case None ⇒ persistent(PersistentMessage.parseFrom(bytes)) case Some(c) ⇒ c match { - case PersistentImplClass ⇒ persistent(PersistentMessage.parseFrom(bytes)) - case ConfirmablePersistentImplClass ⇒ persistent(PersistentMessage.parseFrom(bytes)) - case PersistentReprClass ⇒ persistent(PersistentMessage.parseFrom(bytes)) - case PersistentBatchClass ⇒ persistentBatch(PersistentMessageBatch.parseFrom(bytes)) - case ConfirmClass ⇒ confirm(ConfirmMessage.parseFrom(bytes)) - case DeliverClass ⇒ deliver(DeliverMessage.parseFrom(bytes)) - case _ ⇒ throw new IllegalArgumentException(s"Can't deserialize object of type ${c}") + case PersistentImplClass ⇒ persistent(PersistentMessage.parseFrom(bytes)) + case ConfirmablePersistentImplClass ⇒ persistent(PersistentMessage.parseFrom(bytes)) + case PersistentReprClass ⇒ persistent(PersistentMessage.parseFrom(bytes)) + case PersistentBatchClass ⇒ persistentBatch(PersistentMessageBatch.parseFrom(bytes)) + case DeliveredByTransientChannelClass ⇒ delivered(DeliveredMessage.parseFrom(bytes)) + case DeliveredByPersistentChannelClass ⇒ delivered(DeliveredMessage.parseFrom(bytes)) + case DeliverClass ⇒ deliver(DeliverMessage.parseFrom(bytes)) + case _ ⇒ throw new IllegalArgumentException(s"Can't deserialize object of type ${c}") } } @@ -73,12 +74,8 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer { private def deliverMessageBuilder(deliver: Deliver) = { val builder = DeliverMessage.newBuilder builder.setPersistent(persistentMessageBuilder(deliver.persistent.asInstanceOf[PersistentRepr])) - builder.setDestination(Serialization.serializedActorPath(deliver.destination)) - deliver.resolve match { - case Resolve.Off ⇒ builder.setResolve(DeliverMessage.ResolveStrategy.Off) - case Resolve.Sender ⇒ builder.setResolve(DeliverMessage.ResolveStrategy.Sender) - case Resolve.Destination ⇒ builder.setResolve(DeliverMessage.ResolveStrategy.Destination) - } + builder.setDestination(deliver.destination.toString) + builder } private def persistentMessageBatchBuilder(persistentBatch: PersistentBatch) = { @@ -91,7 +88,7 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer { val builder = PersistentMessage.newBuilder if (persistent.processorId != Undefined) builder.setProcessorId(persistent.processorId) - if (persistent.confirmMessage != null) builder.setConfirmMessage(confirmMessageBuilder(persistent.confirmMessage)) + if (persistent.confirmMessage != null) builder.setConfirmMessage(deliveredMessageBuilder(persistent.confirmMessage)) if (persistent.confirmTarget != null) builder.setConfirmTarget(Serialization.serializedActorPath(persistent.confirmTarget)) if (persistent.sender != null) builder.setSender(Serialization.serializedActorPath(persistent.sender)) @@ -100,7 +97,6 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer { builder.setPayload(persistentPayloadBuilder(persistent.payload.asInstanceOf[AnyRef])) builder.setSequenceNr(persistent.sequenceNr) builder.setDeleted(persistent.deleted) - builder.setResolved(persistent.resolved) builder.setRedeliveries(persistent.redeliveries) builder.setConfirmable(persistent.confirmable) builder @@ -117,16 +113,19 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer { builder } - private def confirmMessageBuilder(confirm: Confirm) = { - val builder = ConfirmMessage.newBuilder + private def deliveredMessageBuilder(delivered: Delivered) = { + val builder = DeliveredMessage.newBuilder - if (confirm.channelEndpoint != null) builder.setChannelEndpoint(Serialization.serializedActorPath(confirm.channelEndpoint)) + if (delivered.channel != null) builder.setChannel(Serialization.serializedActorPath(delivered.channel)) - builder.setProcessorId(confirm.processorId) - builder.setMessageSequenceNr(confirm.messageSequenceNr) - builder.setChannelId(confirm.channelId) - builder.setWrapperSequenceNr(confirm.wrapperSequenceNr) - builder + builder.setChannelId(delivered.channelId) + builder.setPersistentSequenceNr(delivered.persistentSequenceNr) + builder.setDeliverySequenceNr(delivered.deliverySequenceNr) + + delivered match { + case c: DeliveredByChannel ⇒ builder.setProcessorId(c.processorId) + case _ ⇒ builder + } } // @@ -136,12 +135,7 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer { private def deliver(deliverMessage: DeliverMessage): Deliver = { Deliver( persistent(deliverMessage.getPersistent), - system.provider.resolveActorRef(deliverMessage.getDestination), - deliverMessage.getResolve match { - case ResolveStrategy.Off ⇒ Resolve.Off - case ResolveStrategy.Sender ⇒ Resolve.Sender - case ResolveStrategy.Destination ⇒ Resolve.Destination - }) + ActorPath.fromString(deliverMessage.getDestination)) } private def persistentBatch(persistentMessageBatch: PersistentMessageBatch): PersistentBatch = @@ -153,11 +147,10 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer { persistentMessage.getSequenceNr, if (persistentMessage.hasProcessorId) persistentMessage.getProcessorId else Undefined, persistentMessage.getDeleted, - persistentMessage.getResolved, persistentMessage.getRedeliveries, immutableSeq(persistentMessage.getConfirmsList), persistentMessage.getConfirmable, - if (persistentMessage.hasConfirmMessage) confirm(persistentMessage.getConfirmMessage) else null, + if (persistentMessage.hasConfirmMessage) delivered(persistentMessage.getConfirmMessage) else null, if (persistentMessage.hasConfirmTarget) system.provider.resolveActorRef(persistentMessage.getConfirmTarget) else null, if (persistentMessage.hasSender) system.provider.resolveActorRef(persistentMessage.getSender) else null) } @@ -172,12 +165,22 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer { payloadClass).get } - private def confirm(confirmMessage: ConfirmMessage): Confirm = { - Confirm( - confirmMessage.getProcessorId, - confirmMessage.getMessageSequenceNr, - confirmMessage.getChannelId, - confirmMessage.getWrapperSequenceNr, - if (confirmMessage.hasChannelEndpoint) system.provider.resolveActorRef(confirmMessage.getChannelEndpoint) else null) + private def delivered(deliveredMessage: DeliveredMessage): Delivered = { + val channel = if (deliveredMessage.hasChannel) system.provider.resolveActorRef(deliveredMessage.getChannel) else null + + if (deliveredMessage.hasProcessorId) { + DeliveredByChannel( + deliveredMessage.getProcessorId, + deliveredMessage.getChannelId, + deliveredMessage.getPersistentSequenceNr, + deliveredMessage.getDeliverySequenceNr, + channel) + } else { + DeliveredByPersistentChannel( + deliveredMessage.getChannelId, + deliveredMessage.getPersistentSequenceNr, + deliveredMessage.getDeliverySequenceNr, + channel) + } } } diff --git a/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala b/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala index 37b06db5bc..5297e0928a 100644 --- a/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala @@ -19,6 +19,7 @@ trait SnapshotStore extends Actor { import context.dispatcher private val extension = Persistence(context.system) + private val publish = extension.settings.internal.publishPluginCommands final def receive = { case LoadSnapshot(processorId, criteria, toSequenceNr) ⇒ @@ -44,10 +45,10 @@ trait SnapshotStore extends Actor { sender ! evt // sender is processor case d @ DeleteSnapshot(metadata) ⇒ delete(metadata) - if (extension.publishPluginCommands) context.system.eventStream.publish(d) + if (publish) context.system.eventStream.publish(d) case d @ DeleteSnapshots(processorId, criteria) ⇒ delete(processorId, criteria) - if (extension.publishPluginCommands) context.system.eventStream.publish(d) + if (publish) context.system.eventStream.publish(d) } //#snapshot-store-plugin-api diff --git a/akka-persistence/src/test/scala/akka/persistence/ChannelSpec.scala b/akka-persistence/src/test/scala/akka/persistence/ChannelSpec.scala index 9ea005931a..f6ed598f11 100644 --- a/akka-persistence/src/test/scala/akka/persistence/ChannelSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/ChannelSpec.scala @@ -12,8 +12,6 @@ import com.typesafe.config._ import akka.actor._ import akka.testkit._ -import akka.persistence.JournalProtocol.Confirm - object ChannelSpec { class TestDestination extends Actor { def receive = { @@ -36,6 +34,12 @@ object ChannelSpec { cp.confirm() } } + + class TestListener(probe: ActorRef) extends Actor { + def receive = { + case RedeliverFailure(messages) ⇒ messages.foreach(probe ! _.payload) + } + } } abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with PersistenceSpec with ImplicitSender { @@ -56,52 +60,35 @@ abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with Persist super.afterEach() } - def redeliverChannelSettings: ChannelSettings = - ChannelSettings(redeliverMax = 2, redeliverInterval = 100 milliseconds) + private def redeliverChannelSettings(listener: Option[ActorRef]): ChannelSettings = + ChannelSettings(redeliverMax = 2, redeliverInterval = 100 milliseconds, redeliverFailureListener = listener) def createDefaultTestChannel(): ActorRef = - system.actorOf(Channel.props(name, ChannelSettings())) + system.actorOf(Channel.props(s"${name}-default", ChannelSettings())) def createRedeliverTestChannel(): ActorRef = - system.actorOf(Channel.props(name, redeliverChannelSettings)) + system.actorOf(Channel.props(s"${name}-redeliver", redeliverChannelSettings(None))) + + def createRedeliverTestChannel(listener: Option[ActorRef]): ActorRef = + system.actorOf(Channel.props(s"${name}-redeliver-listener", redeliverChannelSettings(listener))) def subscribeToConfirmation(probe: TestProbe): Unit = - system.eventStream.subscribe(probe.ref, classOf[Confirm]) + system.eventStream.subscribe(probe.ref, classOf[Delivered]) def awaitConfirmation(probe: TestProbe): Unit = - probe.expectMsgType[Confirm] + probe.expectMsgType[Delivered] def actorRefFor(topLevelName: String) = extension.system.provider.resolveActorRef(RootActorPath(Address("akka", system.name)) / "user" / topLevelName) "A channel" must { - "must resolve sender references and preserve message order" in { - val destination = system.actorOf(Props[TestDestination]) - - val empty = actorRefFor("testSender") // will be an EmptyLocalActorRef - val sender = system.actorOf(Props(classOf[TestReceiver], testActor), "testSender") - - // replayed message (resolved = false) and invalid sender reference - defaultTestChannel tell (Deliver(PersistentRepr("a", resolved = false), destination, Resolve.Sender), empty) - - // new messages (resolved = true) and valid sender references - defaultTestChannel tell (Deliver(Persistent("b"), destination), sender) - defaultTestChannel tell (Deliver(Persistent("c"), destination), sender) - - expectMsg("a") - expectMsg("b") - expectMsg("c") - } "must resolve destination references and preserve message order" in { val empty = actorRefFor("testDestination") // will be an EmptyLocalActorRef val destination = system.actorOf(Props(classOf[TestReceiver], testActor), "testDestination") - // replayed message (resolved = false) and invalid destination reference - defaultTestChannel ! Deliver(PersistentRepr("a", resolved = false), empty, Resolve.Destination) - - // new messages (resolved = true) and valid destination references - defaultTestChannel ! Deliver(Persistent("b"), destination) - defaultTestChannel ! Deliver(Persistent("c"), destination) + defaultTestChannel ! Deliver(PersistentRepr("a"), empty.path) + defaultTestChannel ! Deliver(Persistent("b"), destination.path) + defaultTestChannel ! Deliver(Persistent("c"), destination.path) expectMsg("a") expectMsg("b") @@ -113,7 +100,7 @@ abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with Persist subscribeToConfirmation(confirmProbe) - defaultTestChannel ! Deliver(Persistent("a"), destination) + defaultTestChannel ! Deliver(Persistent("a"), destination.path) awaitConfirmation(confirmProbe) } @@ -123,9 +110,9 @@ abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with Persist subscribeToConfirmation(confirmProbe) - defaultTestChannel ! Deliver(Persistent("a"), destination) - defaultTestChannel ! Deliver(Persistent("boom"), destination) - defaultTestChannel ! Deliver(Persistent("b"), destination) + defaultTestChannel ! Deliver(Persistent("a"), destination.path) + defaultTestChannel ! Deliver(Persistent("boom"), destination.path) + defaultTestChannel ! Deliver(Persistent("b"), destination.path) awaitConfirmation(confirmProbe) awaitConfirmation(confirmProbe) @@ -136,7 +123,7 @@ abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with Persist subscribeToConfirmation(confirmProbe) - defaultTestChannel ! Deliver(PersistentRepr("a", confirmable = true), destination) + defaultTestChannel ! Deliver(PersistentRepr("a", confirmable = true), destination.path) expectMsgPF() { case m @ ConfirmablePersistent("a", _, _) ⇒ m.confirm() } awaitConfirmation(confirmProbe) @@ -144,21 +131,21 @@ abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with Persist "redeliver on missing confirmation" in { val probe = TestProbe() - redeliverTestChannel ! Deliver(Persistent("b"), probe.ref) + redeliverTestChannel ! Deliver(Persistent("b"), probe.ref.path) probe.expectMsgPF() { case m @ ConfirmablePersistent("b", _, redeliveries) ⇒ redeliveries should be(0) } probe.expectMsgPF() { case m @ ConfirmablePersistent("b", _, redeliveries) ⇒ redeliveries should be(1) } probe.expectMsgPF() { case m @ ConfirmablePersistent("b", _, redeliveries) ⇒ redeliveries should be(2); m.confirm() } } "redeliver in correct relative order" in { - val deliveries = redeliverChannelSettings.redeliverMax + 1 - val interval = redeliverChannelSettings.redeliverInterval.toMillis / 5 * 4 + val deliveries = redeliverChannelSettings(None).redeliverMax + 1 + val interval = redeliverChannelSettings(None).redeliverInterval.toMillis / 5 * 4 val probe = TestProbe() val cycles = 9 1 to cycles foreach { i ⇒ - redeliverTestChannel ! Deliver(Persistent(i), probe.ref) + redeliverTestChannel ! Deliver(Persistent(i), probe.ref.path) Thread.sleep(interval) } @@ -176,13 +163,35 @@ abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with Persist "redeliver not more than redeliverMax on missing confirmation" in { val probe = TestProbe() - redeliverTestChannel ! Deliver(PersistentRepr("a"), probe.ref) + redeliverTestChannel ! Deliver(PersistentRepr("a"), probe.ref.path) probe.expectMsgPF() { case m @ ConfirmablePersistent("a", _, redeliveries) ⇒ redeliveries should be(0) } probe.expectMsgPF() { case m @ ConfirmablePersistent("a", _, redeliveries) ⇒ redeliveries should be(1) } probe.expectMsgPF() { case m @ ConfirmablePersistent("a", _, redeliveries) ⇒ redeliveries should be(2) } probe.expectNoMsg(300 milliseconds) } + "preserve message order to the same destination" in { + val probe = TestProbe() + val destination = system.actorOf(Props(classOf[TestReceiver], probe.ref)) + + 1 to 10 foreach { i ⇒ + defaultTestChannel ! Deliver(PersistentRepr(s"test-${i}"), destination.path) + } + + 1 to 10 foreach { i ⇒ + probe.expectMsg(s"test-${i}") + } + } + "notify redelivery failure listener" in { + val probe = TestProbe() + val listener = system.actorOf(Props(classOf[TestListener], probe.ref)) + val channel = createRedeliverTestChannel(Some(listener)) + + 1 to 3 foreach { i ⇒ channel ! Deliver(Persistent(i), system.deadLetters.path) } + + probe.expectMsgAllOf(1, 2, 3) + system.stop(channel) + } } } diff --git a/akka-persistence/src/test/scala/akka/persistence/FailureSpec.scala b/akka-persistence/src/test/scala/akka/persistence/FailureSpec.scala index 953653fa5f..1fdcf76183 100644 --- a/akka-persistence/src/test/scala/akka/persistence/FailureSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/FailureSpec.scala @@ -21,8 +21,10 @@ object FailureSpec { akka.persistence.destination.chaos.confirm-failure-rate = 0.3 akka.persistence.journal.plugin = "akka.persistence.journal.chaos" akka.persistence.journal.chaos.write-failure-rate = 0.3 + akka.persistence.journal.chaos.confirm-failure-rate = 0.2 akka.persistence.journal.chaos.delete-failure-rate = 0.3 - akka.persistence.journal.chaos.replay-failure-rate = 0.3 + akka.persistence.journal.chaos.replay-failure-rate = 0.25 + akka.persistence.journal.chaos.read-highest-failure-rate = 0.1 akka.persistence.journal.chaos.class = akka.persistence.journal.chaos.ChaosJournal akka.persistence.snapshot-store.local.dir = "target/snapshots-failure-spec/" """) @@ -70,7 +72,7 @@ object FailureSpec { throw new TestException(debugMessage(s"rejected payload ${i}")) } else { add(i) - channel forward Deliver(p, destination) + channel forward Deliver(p, destination.path) log.debug(debugMessage(s"processed payload ${i}")) } case PersistenceFailure(i: Int, _, _) ⇒ diff --git a/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala index 250a0cbd6c..b42a6979ec 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala @@ -15,6 +15,7 @@ object PerformanceSpec { """ akka.persistence.performance.cycles.warmup = 300 akka.persistence.performance.cycles.load = 1000 + akka.persistence.publish-confirmations = on """ case object StartMeasure @@ -166,15 +167,21 @@ class PerformanceSpec extends AkkaSpec(PersistenceSpec.config("leveldb", "Perfor def stressPersistentChannel(): Unit = { val channel = system.actorOf(PersistentChannel.props()) val destination = system.actorOf(Props[PerformanceTestDestination]) - 1 to warmupCycles foreach { i ⇒ channel ! Deliver(Persistent(s"msg${i}"), destination) } - channel ! Deliver(Persistent(StartMeasure), destination) - 1 to loadCycles foreach { i ⇒ channel ! Deliver(Persistent(s"msg${i}"), destination) } - channel ! Deliver(Persistent(StopMeasure), destination) + 1 to warmupCycles foreach { i ⇒ channel ! Deliver(PersistentRepr(s"msg${i}", processorId = "test"), destination.path) } + channel ! Deliver(Persistent(StartMeasure), destination.path) + 1 to loadCycles foreach { i ⇒ channel ! Deliver(PersistentRepr(s"msg${i}", processorId = "test"), destination.path) } + channel ! Deliver(Persistent(StopMeasure), destination.path) expectMsgPF(100 seconds) { - case throughput: Double ⇒ println(f"\nthroughput = $throughput%.2f persistent commands per second") + case throughput: Double ⇒ println(f"\nthroughput = $throughput%.2f persistent messages per second") } } + def subscribeToConfirmation(probe: TestProbe): Unit = + system.eventStream.subscribe(probe.ref, classOf[DeliveredByPersistentChannel]) + + def awaitConfirmation(probe: TestProbe): Unit = + probe.expectMsgType[DeliveredByPersistentChannel] + "A command sourced processor" should { "have some reasonable throughput" in { stressCommandsourcedProcessor(None) @@ -198,7 +205,14 @@ class PerformanceSpec extends AkkaSpec(PersistenceSpec.config("leveldb", "Perfor "A persistent channel" should { "have some reasonable throughput" in { + val probe = TestProbe() + subscribeToConfirmation(probe) + stressPersistentChannel() + + probe.fishForMessage(100.seconds) { + case DeliveredByPersistentChannel(_, snr, _, _) ⇒ snr == warmupCycles + loadCycles + 2 + } } } } diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala index a1f180a43c..ee5f13b7c7 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala @@ -50,6 +50,7 @@ object PersistenceSpec { s""" akka.actor.serialize-creators = ${serialization} akka.actor.serialize-messages = ${serialization} + akka.persistence.publish-confirmations = on akka.persistence.publish-plugin-commands = on akka.persistence.journal.plugin = "akka.persistence.journal.${plugin}" akka.persistence.journal.leveldb.dir = "target/journal-${test}" diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentChannelSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentChannelSpec.scala index 6e7d57e57d..b0db9fc4e2 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistentChannelSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistentChannelSpec.scala @@ -12,88 +12,129 @@ import com.typesafe.config._ import akka.actor._ import akka.testkit._ +object PersistentChannelSpec { + class SlowDestination(probe: ActorRef, maxReceived: Long) extends Actor { + import context.dispatcher + + val delay = 100.millis + var received = Vector.empty[ConfirmablePersistent] + + def receive = { + case cp: ConfirmablePersistent ⇒ + if (received.isEmpty) context.system.scheduler.scheduleOnce(delay, self, "confirm") + received :+= cp + case "confirm" ⇒ + if (received.size > maxReceived) probe ! s"number of received messages to high: ${received.size}" + else probe ! received.head.payload + received.head.confirm() + received = received.tail + if (received.nonEmpty) context.system.scheduler.scheduleOnce(delay, self, "confirm") + } + } +} + abstract class PersistentChannelSpec(config: Config) extends ChannelSpec(config) { - override def redeliverChannelSettings: PersistentChannelSettings = - PersistentChannelSettings(redeliverMax = 2, redeliverInterval = 100 milliseconds) + import PersistentChannelSpec._ + + private def redeliverChannelSettings(listener: Option[ActorRef]): PersistentChannelSettings = + PersistentChannelSettings(redeliverMax = 2, redeliverInterval = 100 milliseconds, redeliverFailureListener = listener) + + private def createDefaultTestChannel(name: String): ActorRef = + system.actorOf(PersistentChannel.props(s"${name}-default", PersistentChannelSettings())) override def createDefaultTestChannel(): ActorRef = - system.actorOf(PersistentChannel.props(name, PersistentChannelSettings())) + createDefaultTestChannel(name) override def createRedeliverTestChannel(): ActorRef = - system.actorOf(PersistentChannel.props(name, redeliverChannelSettings)) + system.actorOf(PersistentChannel.props(s"${name}-redeliver", redeliverChannelSettings(None))) + + override def createRedeliverTestChannel(listener: Option[ActorRef]): ActorRef = + system.actorOf(PersistentChannel.props(s"${name}-redeliver-listener", redeliverChannelSettings(listener))) "A persistent channel" must { - "support disabling and re-enabling delivery" in { - val confirmProbe = TestProbe() - - subscribeToConfirmation(confirmProbe) - - defaultTestChannel ! Deliver(Persistent("a"), testActor) - - expectMsgPF() { case m @ ConfirmablePersistent("a", _, _) ⇒ m.confirm() } - awaitConfirmation(confirmProbe) - - defaultTestChannel ! DisableDelivery - defaultTestChannel ! Deliver(Persistent("b"), testActor) - defaultTestChannel ! EnableDelivery - defaultTestChannel ! Deliver(Persistent("c"), testActor) - - expectMsgPF() { case m @ ConfirmablePersistent("b", _, _) ⇒ m.confirm() } - expectMsgPF() { case m @ ConfirmablePersistent("c", _, _) ⇒ m.confirm() } - } "support Persistent replies to Deliver senders" in { val channel1 = system.actorOf(PersistentChannel.props(s"${name}-with-reply", PersistentChannelSettings(replyPersistent = true))) - channel1 ! Deliver(Persistent("a"), system.deadLetters) - expectMsgPF() { case Persistent("a", 1) ⇒ } + channel1 ! Deliver(Persistent("a"), system.deadLetters.path) + expectMsgPF() { case Persistent("a", _) ⇒ } - channel1 ! Deliver(PersistentRepr("b", sequenceNr = 13), system.deadLetters) + channel1 ! Deliver(PersistentRepr("b", sequenceNr = 13), system.deadLetters.path) expectMsgPF() { case Persistent("b", 13) ⇒ } system.stop(channel1) } - "must not modify certain persistent message field" in { + "not modify certain persistent message fields" in { val persistent1 = PersistentRepr(payload = "a", processorId = "p1", confirms = List("c1", "c2"), sender = defaultTestChannel, sequenceNr = 13) val persistent2 = PersistentRepr(payload = "b", processorId = "p1", confirms = List("c1", "c2"), sender = defaultTestChannel) - defaultTestChannel ! Deliver(persistent1, testActor) - defaultTestChannel ! Deliver(persistent2, testActor) + defaultTestChannel ! Deliver(persistent1, testActor.path) + defaultTestChannel ! Deliver(persistent2, testActor.path) - expectMsgPF() { case cp @ ConfirmablePersistentImpl("a", 13, "p1", _, _, _, Seq("c1", "c2"), _, _, channel) ⇒ cp.confirm() } - expectMsgPF() { case cp @ ConfirmablePersistentImpl("b", 2, "p1", _, _, _, Seq("c1", "c2"), _, _, channel) ⇒ cp.confirm() } + expectMsgPF() { case cp @ ConfirmablePersistentImpl("a", 13, "p1", _, _, Seq("c1", "c2"), _, _, channel) ⇒ cp.confirm() } + expectMsgPF() { case cp @ ConfirmablePersistentImpl("b", 2, "p1", _, _, Seq("c1", "c2"), _, _, channel) ⇒ cp.confirm() } } - } + "redeliver un-confirmed stored messages during recovery" in { + val confirmProbe = TestProbe() + val forwardProbe = TestProbe() - "A persistent channel" when { - "used standalone" must { - "redeliver un-confirmed stored messages during recovery" in { - val confirmProbe = TestProbe() - val forwardProbe = TestProbe() + subscribeToConfirmation(confirmProbe) - subscribeToConfirmation(confirmProbe) + val channel1 = createDefaultTestChannel("extra") + channel1 tell (Deliver(Persistent("a1"), forwardProbe.ref.path), null) + channel1 tell (Deliver(Persistent("a2"), forwardProbe.ref.path), null) - val channel1 = createDefaultTestChannel() - channel1 tell (Deliver(Persistent("a1"), forwardProbe.ref), null) - channel1 tell (Deliver(Persistent("a2"), forwardProbe.ref), null) + forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a1", _, _) ⇒ /* no confirmation */ } + forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a2", _, _) ⇒ m.confirm() } - forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a1", _, _) ⇒ /* no confirmation */ } - forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a2", _, _) ⇒ m.confirm() } + awaitConfirmation(confirmProbe) - awaitConfirmation(confirmProbe) + system.stop(channel1) - system.stop(channel1) + val channel2 = createDefaultTestChannel("extra") + channel2 tell (Deliver(Persistent("a3"), forwardProbe.ref.path), null) - val channel2 = createDefaultTestChannel() - channel2 tell (Deliver(Persistent("a3"), forwardProbe.ref), null) + forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a1", _, _) ⇒ m.confirm() } + forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a3", _, _) ⇒ m.confirm() } - forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a1", _, _) ⇒ m.confirm() } // sender still valid, no need to resolve - forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a3", _, _) ⇒ m.confirm() } + awaitConfirmation(confirmProbe) + awaitConfirmation(confirmProbe) - awaitConfirmation(confirmProbe) - awaitConfirmation(confirmProbe) + system.stop(channel2) + } + "not flood destinations" in { + val probe = TestProbe() + val settings = PersistentChannelSettings( + redeliverMax = 0, + redeliverInterval = 1.minute, + pendingConfirmationsMax = 4, + pendingConfirmationsMin = 2) - system.stop(channel2) - } + val channel = system.actorOf(PersistentChannel.props(s"${name}-watermark", settings)) + val destination = system.actorOf(Props(classOf[SlowDestination], probe.ref, settings.pendingConfirmationsMax)) + + 1 to 10 foreach { i ⇒ channel ! Deliver(Persistent(i), destination.path) } + 1 to 10 foreach { i ⇒ probe.expectMsg(i) } + + system.stop(channel) + } + "redeliver on reset" in { + val probe = TestProbe() + val settings = PersistentChannelSettings( + redeliverMax = 0, + redeliverInterval = 1.minute, + pendingConfirmationsMax = 4, + pendingConfirmationsMin = 2) + + val channel = system.actorOf(PersistentChannel.props(s"${name}-reset", settings)) + + 1 to 3 foreach { i ⇒ channel ! Deliver(Persistent(i), probe.ref.path) } + 1 to 3 foreach { i ⇒ probe.expectMsgPF() { case ConfirmablePersistent(`i`, _, _) ⇒ } } + + channel ! Reset + + 1 to 3 foreach { i ⇒ probe.expectMsgPF() { case ConfirmablePersistent(`i`, _, _) ⇒ } } + + system.stop(channel) } } } diff --git a/akka-persistence/src/test/scala/akka/persistence/ProcessorChannelSpec.scala b/akka-persistence/src/test/scala/akka/persistence/ProcessorChannelSpec.scala index 88116d6d21..ffeed8f9b9 100644 --- a/akka-persistence/src/test/scala/akka/persistence/ProcessorChannelSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/ProcessorChannelSpec.scala @@ -12,8 +12,6 @@ import com.typesafe.config._ import akka.actor._ import akka.testkit._ -import akka.persistence.JournalProtocol.Confirm - object ProcessorChannelSpec { class TestProcessor(name: String) extends NamedProcessor(name) { val destination = context.actorOf(Props[TestDestination]) @@ -23,10 +21,10 @@ object ProcessorChannelSpec { case m @ Persistent(s: String, _) if s.startsWith("a") ⇒ // forward to destination via channel, // destination replies to initial sender - channel forward Deliver(m.withPayload(s"fw: ${s}"), destination) + channel forward Deliver(m.withPayload(s"fw: ${s}"), destination.path) case m @ Persistent(s: String, _) if s.startsWith("b") ⇒ // reply to sender via channel - channel ! Deliver(m.withPayload(s"re: ${s}"), sender) + channel ! Deliver(m.withPayload(s"re: ${s}"), sender.path) } } @@ -40,7 +38,7 @@ object ProcessorChannelSpec { val channel = context.actorOf(Channel.props("channel", ChannelSettings(redeliverMax = 1, redeliverInterval = 100 milliseconds))) def receive = { - case p: Persistent ⇒ channel ! Deliver(p, destination) + case p: Persistent ⇒ channel ! Deliver(p, destination.path) case "replay" ⇒ throw new TestException("replay requested") } } @@ -52,7 +50,7 @@ object ProcessorChannelSpec { def handleEvent(event: String) = { events = event :: events - channel ! Deliver(Persistent(event), destination) + channel ! Deliver(Persistent(event), destination.path) } def receiveReplay: Receive = { @@ -83,10 +81,10 @@ abstract class ProcessorChannelSpec(config: Config) extends AkkaSpec(config) wit } def subscribeToConfirmation(probe: TestProbe): Unit = - system.eventStream.subscribe(probe.ref, classOf[Confirm]) + system.eventStream.subscribe(probe.ref, classOf[Delivered]) def awaitConfirmation(probe: TestProbe): Unit = - probe.expectMsgType[Confirm] + probe.expectMsgType[Delivered] def createTestProcessor(): ActorRef = system.actorOf(Props(classOf[TestProcessor], name)) diff --git a/akka-persistence/src/test/scala/akka/persistence/ProcessorSpec.scala b/akka-persistence/src/test/scala/akka/persistence/ProcessorSpec.scala index 3d7bf1eaf4..aab5c5ead6 100644 --- a/akka-persistence/src/test/scala/akka/persistence/ProcessorSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/ProcessorSpec.scala @@ -304,14 +304,14 @@ abstract class ProcessorSpec(config: Config) extends AkkaSpec(config) with Persi "support single message deletions" in { val deleteProbe = TestProbe() - system.eventStream.subscribe(deleteProbe.ref, classOf[Delete]) + system.eventStream.subscribe(deleteProbe.ref, classOf[DeleteMessages]) val processor1 = namedProcessor[DeleteMessageTestProcessor] processor1 ! Persistent("c") processor1 ! Persistent("d") processor1 ! Persistent("e") processor1 ! Delete1(4) - deleteProbe.expectMsgType[Delete] + deleteProbe.expectMsgType[DeleteMessages] val processor2 = namedProcessor[DeleteMessageTestProcessor] processor2 ! GetState @@ -321,19 +321,29 @@ abstract class ProcessorSpec(config: Config) extends AkkaSpec(config) with Persi "support bulk message deletions" in { val deleteProbe = TestProbe() - system.eventStream.subscribe(deleteProbe.ref, classOf[Delete]) + system.eventStream.subscribe(deleteProbe.ref, classOf[DeleteMessagesTo]) val processor1 = namedProcessor[DeleteMessageTestProcessor] processor1 ! Persistent("c") processor1 ! Persistent("d") processor1 ! Persistent("e") processor1 ! DeleteN(4) - deleteProbe.expectMsgType[Delete] + deleteProbe.expectMsgType[DeleteMessagesTo] val processor2 = namedProcessor[DeleteMessageTestProcessor] processor2 ! GetState expectMsg(List("e-5")) + + processor2 ! Persistent("f") + processor2 ! Persistent("g") + processor2 ! DeleteN(6) + deleteProbe.expectMsgType[DeleteMessagesTo] + + val processor3 = namedProcessor[DeleteMessageTestProcessor] + processor3 ! GetState + + expectMsg(List("g-7")) } } diff --git a/akka-persistence/src/test/scala/akka/persistence/ViewSpec.scala b/akka-persistence/src/test/scala/akka/persistence/ViewSpec.scala new file mode 100644 index 0000000000..82b625db0c --- /dev/null +++ b/akka-persistence/src/test/scala/akka/persistence/ViewSpec.scala @@ -0,0 +1,279 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package akka.persistence + +import scala.concurrent.duration._ + +import com.typesafe.config.Config + +import akka.actor._ +import akka.testkit._ + +object ViewSpec { + class TestProcessor(name: String, probe: ActorRef) extends NamedProcessor(name) { + def receive = { + case Persistent(payload, sequenceNr) ⇒ + probe ! s"${payload}-${sequenceNr}" + } + } + + class TestView(name: String, probe: ActorRef, interval: FiniteDuration, var failAt: Option[String]) extends View { + def this(name: String, probe: ActorRef, interval: FiniteDuration) = + this(name, probe, interval, None) + + def this(name: String, probe: ActorRef) = + this(name, probe, 100.milliseconds) + + override def autoUpdateInterval: FiniteDuration = interval.dilated(context.system) + override val processorId: String = name + + var last: String = _ + + def receive = { + case "get" ⇒ + probe ! last + case "boom" ⇒ + throw new TestException("boom") + case Persistent(payload, _) if Some(payload) == failAt ⇒ + throw new TestException("boom") + case Persistent(payload, sequenceNr) ⇒ + last = s"replicated-${payload}-${sequenceNr}" + probe ! last + } + + override def postRestart(reason: Throwable): Unit = { + super.postRestart(reason) + failAt = None + } + } + + class PassiveTestView(name: String, probe: ActorRef, var failAt: Option[String]) extends View { + override val processorId: String = name + + override def autoUpdate: Boolean = false + override def autoUpdateReplayMax: Long = 0L // no message replay during initial recovery + + var last: String = _ + + def receive = { + case "get" ⇒ + probe ! last + case Persistent(payload, _) if Some(payload) == failAt ⇒ + throw new TestException("boom") + case Persistent(payload, sequenceNr) ⇒ + last = s"replicated-${payload}-${sequenceNr}" + } + + override def postRestart(reason: Throwable): Unit = { + super.postRestart(reason) + failAt = None + } + } + + class TestDestination(probe: ActorRef) extends Actor { + def receive = { + case cp @ ConfirmablePersistent(payload, sequenceNr, _) ⇒ + cp.confirm() + probe ! s"${payload}-${sequenceNr}" + } + } + + class EmittingView(name: String, destination: ActorRef) extends View { + override def autoUpdateInterval: FiniteDuration = 100.milliseconds.dilated(context.system) + override val processorId: String = name + + val channel = context.actorOf(Channel.props(s"${name}-channel")) + + def receive = { + case "restart" ⇒ + throw new TestException("restart requested") + case Persistent(payload, sequenceNr) ⇒ + channel ! Deliver(Persistent(s"emitted-${payload}"), destination.path) + } + } + + class SnapshottingView(name: String, probe: ActorRef) extends View { + override def autoUpdateInterval: FiniteDuration = 100.microseconds.dilated(context.system) + override val processorId: String = name + override val viewId: String = s"${name}-replicator" + + var last: String = _ + + def receive = { + case "get" ⇒ + probe ! last + case "snap" ⇒ + saveSnapshot(last) + case "restart" ⇒ + throw new TestException("restart requested") + case SaveSnapshotSuccess(_) ⇒ + probe ! "snapped" + case SnapshotOffer(metadata, snapshot: String) ⇒ + last = snapshot + probe ! last + case Persistent(payload, sequenceNr) ⇒ + last = s"replicated-${payload}-${sequenceNr}" + probe ! last + } + } +} + +abstract class ViewSpec(config: Config) extends AkkaSpec(config) with PersistenceSpec with ImplicitSender { + import ViewSpec._ + + var processor: ActorRef = _ + var view: ActorRef = _ + + var processorProbe: TestProbe = _ + var viewProbe: TestProbe = _ + + override protected def beforeEach(): Unit = { + super.beforeEach() + + processorProbe = TestProbe() + viewProbe = TestProbe() + + processor = system.actorOf(Props(classOf[TestProcessor], name, processorProbe.ref)) + processor ! Persistent("a") + processor ! Persistent("b") + + processorProbe.expectMsg("a-1") + processorProbe.expectMsg("b-2") + } + + override protected def afterEach(): Unit = { + system.stop(processor) + system.stop(view) + super.afterEach() + } + + def subscribeToConfirmation(probe: TestProbe): Unit = + system.eventStream.subscribe(probe.ref, classOf[Delivered]) + + def awaitConfirmation(probe: TestProbe): Unit = + probe.expectMsgType[Delivered] + + "A view" must { + "receive past updates from a processor" in { + view = system.actorOf(Props(classOf[TestView], name, viewProbe.ref)) + viewProbe.expectMsg("replicated-a-1") + viewProbe.expectMsg("replicated-b-2") + } + "receive live updates from a processor" in { + view = system.actorOf(Props(classOf[TestView], name, viewProbe.ref)) + viewProbe.expectMsg("replicated-a-1") + viewProbe.expectMsg("replicated-b-2") + processor ! Persistent("c") + viewProbe.expectMsg("replicated-c-3") + } + "run updates at specified interval" in { + view = system.actorOf(Props(classOf[TestView], name, viewProbe.ref, 2.seconds)) + // initial update is done on start + viewProbe.expectMsg("replicated-a-1") + viewProbe.expectMsg("replicated-b-2") + // live updates takes 5 seconds to replicate + processor ! Persistent("c") + viewProbe.expectNoMsg(1.second) + viewProbe.expectMsg("replicated-c-3") + } + "run updates on user request" in { + view = system.actorOf(Props(classOf[TestView], name, viewProbe.ref, 5.seconds)) + viewProbe.expectMsg("replicated-a-1") + viewProbe.expectMsg("replicated-b-2") + processor ! Persistent("c") + processorProbe.expectMsg("c-3") + view ! Update(await = false) + viewProbe.expectMsg("replicated-c-3") + } + "run updates on user request and await update" in { + view = system.actorOf(Props(classOf[TestView], name, viewProbe.ref, 5.seconds)) + viewProbe.expectMsg("replicated-a-1") + viewProbe.expectMsg("replicated-b-2") + processor ! Persistent("c") + processorProbe.expectMsg("c-3") + view ! Update(await = true) + view ! "get" + viewProbe.expectMsg("replicated-c-3") + } + "run updates again on failure outside an update cycle" in { + view = system.actorOf(Props(classOf[TestView], name, viewProbe.ref, 5.seconds)) + viewProbe.expectMsg("replicated-a-1") + viewProbe.expectMsg("replicated-b-2") + view ! "boom" + viewProbe.expectMsg("replicated-a-1") + viewProbe.expectMsg("replicated-b-2") + } + "run updates again on failure during an update cycle" in { + processor ! Persistent("c") + processorProbe.expectMsg("c-3") + view = system.actorOf(Props(classOf[TestView], name, viewProbe.ref, 5.seconds, Some("b"))) + viewProbe.expectMsg("replicated-a-1") + viewProbe.expectMsg("replicated-a-1") + viewProbe.expectMsg("replicated-b-2") + viewProbe.expectMsg("replicated-c-3") + } + "run size-limited updates on user request" in { + processor ! Persistent("c") + processor ! Persistent("d") + processor ! Persistent("e") + processor ! Persistent("f") + + processorProbe.expectMsg("c-3") + processorProbe.expectMsg("d-4") + processorProbe.expectMsg("e-5") + processorProbe.expectMsg("f-6") + + view = system.actorOf(Props(classOf[PassiveTestView], name, viewProbe.ref, None)) + + view ! Update(await = true, replayMax = 2) + view ! "get" + viewProbe.expectMsg("replicated-b-2") + + view ! Update(await = true, replayMax = 1) + view ! "get" + viewProbe.expectMsg("replicated-c-3") + + view ! Update(await = true, replayMax = 4) + view ! "get" + viewProbe.expectMsg("replicated-f-6") + } + } + + "A view" can { + "use channels" in { + val confirmProbe = TestProbe() + val destinationProbe = TestProbe() + val destination = system.actorOf(Props(classOf[TestDestination], destinationProbe.ref)) + + subscribeToConfirmation(confirmProbe) + + view = system.actorOf(Props(classOf[EmittingView], name, destination)) + destinationProbe.expectMsg("emitted-a-1") + destinationProbe.expectMsg("emitted-b-2") + awaitConfirmation(confirmProbe) + awaitConfirmation(confirmProbe) + + view ! "restart" + processor ! Persistent("c") + + destinationProbe.expectMsg("emitted-c-3") + awaitConfirmation(confirmProbe) + } + "take snapshots" in { + view = system.actorOf(Props(classOf[SnapshottingView], name, viewProbe.ref)) + viewProbe.expectMsg("replicated-a-1") + viewProbe.expectMsg("replicated-b-2") + view ! "snap" + viewProbe.expectMsg("snapped") + view ! "restart" + processor ! Persistent("c") + viewProbe.expectMsg("replicated-b-2") + viewProbe.expectMsg("replicated-c-3") + } + } +} + +class LeveldbViewSpec extends ViewSpec(PersistenceSpec.config("leveldb", "LeveldbViewSpec")) +class InmemViewSpec extends ViewSpec(PersistenceSpec.config("inmem", "InmemViewSpec")) + diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala b/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala index 82c6e81572..c81563a074 100644 --- a/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala +++ b/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala @@ -4,7 +4,7 @@ package akka.persistence.journal.chaos -import scala.collection.immutable.Seq +import scala.collection.immutable import scala.concurrent.Future import scala.concurrent.forkjoin.ThreadLocalRandom @@ -15,11 +15,17 @@ import akka.persistence.journal.inmem.InmemMessages class WriteFailedException(ps: Seq[PersistentRepr]) extends TestException(s"write failed for payloads = [${ps.map(_.payload)}]") -class ReplayFailedException(ps: Seq[PersistentRepr]) - extends TestException(s"replay failed after payloads = [${ps.map(_.payload)}]") +class ConfirmFailedException(cs: Seq[PersistentConfirmation]) + extends TestException(s"write failed for confirmations = [${cs.map(c ⇒ s"${c.processorId}-${c.sequenceNr}-${c.channelId}")}]") -class DeleteFailedException(processorId: String, fromSequenceNr: Long, toSequenceNr: Long) - extends TestException(s"delete failed for processor id = [${processorId}], from sequence number = [${fromSequenceNr}], to sequence number = [${toSequenceNr}]") +class ReplayFailedException(ps: Seq[PersistentRepr]) + extends TestException(s"recovery failed after replaying payloads = [${ps.map(_.payload)}]") + +class ReadHighestFailedException + extends TestException(s"recovery failed when reading highest sequence number") + +class DeleteFailedException(messageIds: immutable.Seq[PersistentId]) + extends TestException(s"delete failed for message ids = [${messageIds}]") /** * Keep [[ChaosJournal]] state in an external singleton so that it survives journal restarts. @@ -32,33 +38,44 @@ class ChaosJournal extends SyncWriteJournal { val config = context.system.settings.config.getConfig("akka.persistence.journal.chaos") val writeFailureRate = config.getDouble("write-failure-rate") + val confirmFailureRate = config.getDouble("confirm-failure-rate") val deleteFailureRate = config.getDouble("delete-failure-rate") val replayFailureRate = config.getDouble("replay-failure-rate") + val readHighestFailureRate = config.getDouble("read-highest-failure-rate") def random = ThreadLocalRandom.current - def write(persistentBatch: Seq[PersistentRepr]): Unit = - if (shouldFail(writeFailureRate)) throw new WriteFailedException(persistentBatch) - else persistentBatch.foreach(add) + def writeMessages(messages: immutable.Seq[PersistentRepr]): Unit = + if (shouldFail(writeFailureRate)) throw new WriteFailedException(messages) + else messages.foreach(add) - def delete(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean): Unit = - if (shouldFail(deleteFailureRate)) throw new DeleteFailedException(processorId, fromSequenceNr, toSequenceNr) - else fromSequenceNr to toSequenceNr foreach { snr ⇒ if (permanent) del(processorId, snr) else update(processorId, snr)(_.update(deleted = true)) } + def writeConfirmations(confirmations: immutable.Seq[PersistentConfirmation]): Unit = + if (shouldFail(confirmFailureRate)) throw new ConfirmFailedException(confirmations) + else confirmations.foreach(cnf ⇒ update(cnf.processorId, cnf.sequenceNr)(p ⇒ p.update(confirms = cnf.channelId +: p.confirms))) - def confirm(processorId: String, sequenceNr: Long, channelId: String): Unit = - update(processorId, sequenceNr)(p ⇒ p.update(confirms = channelId +: p.confirms)) + def deleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean): Unit = + if (shouldFail(deleteFailureRate)) throw new DeleteFailedException(messageIds) + else if (permanent) messageIds.foreach(mid ⇒ update(mid.processorId, mid.sequenceNr)(_.update(deleted = true))) + else messageIds.foreach(mid ⇒ del(mid.processorId, mid.sequenceNr)) - def replayAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: (PersistentRepr) ⇒ Unit): Future[Long] = + def deleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean): Unit = + (1L to toSequenceNr).map(PersistentIdImpl(processorId, _)) + + def asyncReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: (PersistentRepr) ⇒ Unit): Future[Unit] = if (shouldFail(replayFailureRate)) { - val rm = read(processorId, fromSequenceNr, toSequenceNr) + val rm = read(processorId, fromSequenceNr, toSequenceNr, max) val sm = rm.take(random.nextInt(rm.length + 1)) sm.foreach(replayCallback) Future.failed(new ReplayFailedException(sm)) } else { - read(processorId, fromSequenceNr, toSequenceNr).foreach(replayCallback) - Future.successful(maxSequenceNr(processorId)) + read(processorId, fromSequenceNr, toSequenceNr, max).foreach(replayCallback) + Future.successful(()) } + def asyncReadHighestSequenceNr(processorId: String, fromSequenceNr: Long): Future[Long] = + if (shouldFail(readHighestFailureRate)) Future.failed(new ReadHighestFailedException) + else Future.successful(highestSequenceNr(processorId)) + def shouldFail(rate: Double): Boolean = random.nextDouble() < rate } diff --git a/akka-persistence/src/test/scala/akka/persistence/serialization/SerializerSpec.scala b/akka-persistence/src/test/scala/akka/persistence/serialization/SerializerSpec.scala index 907d531a61..5ec13c700b 100644 --- a/akka-persistence/src/test/scala/akka/persistence/serialization/SerializerSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/serialization/SerializerSpec.scala @@ -5,17 +5,21 @@ package akka.persistence.serialization import scala.collection.immutable +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.util._ import com.typesafe.config._ import akka.actor._ +import akka.pattern.ask import akka.persistence._ -import akka.persistence.JournalProtocol.Confirm import akka.serialization._ import akka.testkit._ +import akka.util.Timeout object SerializerSpecConfigs { - val customSerializers = + val customSerializers = ConfigFactory.parseString( """ akka.actor { serializers { @@ -27,9 +31,9 @@ object SerializerSpecConfigs { "akka.persistence.serialization.MySnapshot" = my-snapshot } } - """ + """) - val remoteCommon = + val remote = ConfigFactory.parseString( """ akka { actor { @@ -37,16 +41,16 @@ object SerializerSpecConfigs { } remote { enabled-transports = ["akka.remote.netty.tcp"] - netty.tcp.hostname = "127.0.0.1" + netty.tcp { + hostname = "127.0.0.1" + port = 0 + } } loglevel = ERROR log-dead-letters = 0 log-dead-letters-during-shutdown = off } - """ - - val systemA = "akka.remote.netty.tcp.port = 0" - val systemB = "akka.remote.netty.tcp.port = 0" + """) def config(configs: String*): Config = configs.foldLeft(ConfigFactory.empty)((r, c) ⇒ r.withFallback(ConfigFactory.parseString(c))) @@ -54,7 +58,7 @@ object SerializerSpecConfigs { import SerializerSpecConfigs._ -class SnapshotSerializerPersistenceSpec extends AkkaSpec(config(customSerializers)) { +class SnapshotSerializerPersistenceSpec extends AkkaSpec(customSerializers) { val serialization = SerializationExtension(system) "A snapshot serializer" must { @@ -70,13 +74,13 @@ class SnapshotSerializerPersistenceSpec extends AkkaSpec(config(customSerializer } } -class MessageSerializerPersistenceSpec extends AkkaSpec(config(customSerializers)) { +class MessageSerializerPersistenceSpec extends AkkaSpec(customSerializers) { val serialization = SerializationExtension(system) "A message serializer" when { "not given a manifest" must { "handle custom ConfirmablePersistent message serialization" in { - val persistent = PersistentRepr(MyPayload("a"), 13, "p1", true, true, 3, List("c1", "c2"), confirmable = true, Confirm("p2", 14, "c2"), testActor, testActor) + val persistent = PersistentRepr(MyPayload("a"), 13, "p1", true, 3, List("c1", "c2"), confirmable = true, DeliveredByChannel("p2", "c2", 14), testActor, testActor) val serializer = serialization.findSerializerFor(persistent) val bytes = serializer.toBinary(persistent) @@ -85,7 +89,7 @@ class MessageSerializerPersistenceSpec extends AkkaSpec(config(customSerializers deserialized should be(persistent.withPayload(MyPayload(".a."))) } "handle custom Persistent message serialization" in { - val persistent = PersistentRepr(MyPayload("a"), 13, "p1", true, true, 0, List("c1", "c2"), confirmable = false, Confirm("p2", 14, "c2"), testActor, testActor) + val persistent = PersistentRepr(MyPayload("a"), 13, "p1", true, 0, List("c1", "c2"), confirmable = false, DeliveredByChannel("p2", "c2", 14), testActor, testActor) val serializer = serialization.findSerializerFor(persistent) val bytes = serializer.toBinary(persistent) @@ -96,7 +100,7 @@ class MessageSerializerPersistenceSpec extends AkkaSpec(config(customSerializers } "given a PersistentRepr manifest" must { "handle custom ConfirmablePersistent message serialization" in { - val persistent = PersistentRepr(MyPayload("b"), 13, "p1", true, true, 3, List("c1", "c2"), confirmable = true, Confirm("p2", 14, "c2"), testActor, testActor) + val persistent = PersistentRepr(MyPayload("b"), 13, "p1", true, 3, List("c1", "c2"), confirmable = true, DeliveredByChannel("p2", "c2", 14), testActor, testActor) val serializer = serialization.findSerializerFor(persistent) val bytes = serializer.toBinary(persistent) @@ -105,7 +109,7 @@ class MessageSerializerPersistenceSpec extends AkkaSpec(config(customSerializers deserialized should be(persistent.withPayload(MyPayload(".b."))) } "handle custom Persistent message serialization" in { - val persistent = PersistentRepr(MyPayload("b"), 13, "p1", true, true, 3, List("c1", "c2"), confirmable = true, Confirm("p2", 14, "c2"), testActor, testActor) + val persistent = PersistentRepr(MyPayload("b"), 13, "p1", true, 3, List("c1", "c2"), confirmable = true, DeliveredByChannel("p2", "c2", 14), testActor, testActor) val serializer = serialization.findSerializerFor(persistent) val bytes = serializer.toBinary(persistent) @@ -115,12 +119,21 @@ class MessageSerializerPersistenceSpec extends AkkaSpec(config(customSerializers } } "given a Confirm manifest" must { - "handle Confirm message serialization" in { - val confirmation = Confirm("x", 2, "y") + "handle DeliveryByChannel message serialization" in { + val confirmation = DeliveredByChannel("p2", "c2", 14) val serializer = serialization.findSerializerFor(confirmation) val bytes = serializer.toBinary(confirmation) - val deserialized = serializer.fromBinary(bytes, Some(classOf[Confirm])) + val deserialized = serializer.fromBinary(bytes, Some(classOf[DeliveredByChannel])) + + deserialized should be(confirmation) + } + "handle DeliveredByPersistentChannel message serialization" in { + val confirmation = DeliveredByPersistentChannel("c2", 14) + val serializer = serialization.findSerializerFor(confirmation) + + val bytes = serializer.toBinary(confirmation) + val deserialized = serializer.fromBinary(bytes, Some(classOf[DeliveredByPersistentChannel])) deserialized should be(confirmation) } @@ -140,19 +153,27 @@ object MessageSerializerRemotingSpec { case PersistentBatch(Persistent(MyPayload(data), _) +: tail) ⇒ sender ! s"b${data}" case ConfirmablePersistent(MyPayload(data), _, _) ⇒ sender ! s"c${data}" case Persistent(MyPayload(data), _) ⇒ sender ! s"p${data}" - case p @ Confirm(pid, msnr, cid, wsnr, ep) ⇒ sender ! s"${pid},${msnr},${cid},${wsnr},${ep.path.name.startsWith("testActor")}" + case DeliveredByChannel(pid, cid, msnr, dsnr, ep) ⇒ sender ! s"${pid},${cid},${msnr},${dsnr},${ep.path.name.startsWith("testActor")}" + case DeliveredByPersistentChannel(cid, msnr, dsnr, ep) ⇒ sender ! s"${cid},${msnr},${dsnr},${ep.path.name.startsWith("testActor")}" + case Deliver(Persistent(payload, _), dp) ⇒ context.actorSelection(dp) ! payload } } def port(system: ActorSystem) = - system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress.port.get + address(system).port.get + + def address(system: ActorSystem) = + system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress } -class MessageSerializerRemotingSpec extends AkkaSpec(config(systemA).withFallback(config(customSerializers, remoteCommon))) with ImplicitSender { - import MessageSerializerRemotingSpec._ +class MessageSerializerRemotingSpec extends AkkaSpec(remote.withFallback(customSerializers)) with ImplicitSender { + implicit val timeout = Timeout(5.seconds) - val remoteSystem = ActorSystem("remote", config(systemB).withFallback(config(customSerializers, remoteCommon))) - val localActor = system.actorOf(Props(classOf[LocalActor], port(remoteSystem))) + import MessageSerializerRemotingSpec._ + import system.dispatcher + + val remoteSystem = ActorSystem("remote", remote.withFallback(customSerializers)) + val localActor = system.actorOf(Props(classOf[LocalActor], port(remoteSystem)), "local") override protected def atStartup() { remoteSystem.actorOf(Props[RemoteActor], "remote") @@ -176,9 +197,17 @@ class MessageSerializerRemotingSpec extends AkkaSpec(config(systemA).withFallbac localActor ! PersistentBatch(immutable.Seq(Persistent(MyPayload("a")))) expectMsg("b.a.") } - "serialize Confirm messages during remoting" in { - localActor ! Confirm("a", 2, "b", 3, testActor) - expectMsg("a,2,b,3,true") + "serialize DeliveredByChannel messages during remoting" in { + localActor ! DeliveredByChannel("a", "b", 2, 3, testActor) + expectMsg("a,b,2,3,true") + } + "serialize DeliveredByPersistentChannel messages during remoting" in { + localActor ! DeliveredByPersistentChannel("c", 2, 3, testActor) + expectMsg("c,2,3,true") + } + "serialize Deliver messages during remoting" in { + localActor ! Deliver(Persistent("a"), ActorPath.fromString(testActor.path.toStringWithAddress(address(system)))) + expectMsg("a") } } } diff --git a/akka-samples/akka-sample-persistence/src/main/java/sample/persistence/japi/ProcessorChannelExample.java b/akka-samples/akka-sample-persistence/src/main/java/sample/persistence/japi/ProcessorChannelExample.java index 8bcfc75679..3a0d42ef6e 100644 --- a/akka-samples/akka-sample-persistence/src/main/java/sample/persistence/japi/ProcessorChannelExample.java +++ b/akka-samples/akka-sample-persistence/src/main/java/sample/persistence/japi/ProcessorChannelExample.java @@ -22,7 +22,9 @@ public class ProcessorChannelExample { if (message instanceof Persistent) { Persistent msg = (Persistent)message; System.out.println("processed " + msg.payload()); - channel.tell(Deliver.create(msg.withPayload("processed " + msg.payload()), destination), getSelf()); + channel.tell(Deliver.create(msg.withPayload("processed " + msg.payload()), destination.path()), getSelf()); + } else if (message instanceof String) { + System.out.println("reply = " + message); } } } @@ -32,8 +34,9 @@ public class ProcessorChannelExample { public void onReceive(Object message) throws Exception { if (message instanceof ConfirmablePersistent) { ConfirmablePersistent msg = (ConfirmablePersistent)message; - msg.confirm(); System.out.println("received " + msg.payload()); + getSender().tell(String.format("re: %s (%d)", msg.payload(), msg.sequenceNr()), null); + msg.confirm(); } } } diff --git a/akka-samples/akka-sample-persistence/src/main/java/sample/persistence/japi/ViewExample.java b/akka-samples/akka-sample-persistence/src/main/java/sample/persistence/japi/ViewExample.java new file mode 100644 index 0000000000..b1f886845a --- /dev/null +++ b/akka-samples/akka-sample-persistence/src/main/java/sample/persistence/japi/ViewExample.java @@ -0,0 +1,90 @@ +package sample.persistence.japi; + +import java.util.Scanner; + +import akka.actor.*; +import akka.persistence.*; + +public class ViewExample { + public static class ExampleProcessor extends UntypedProcessor { + @Override + public String processorId() { + return "processor-5"; + } + + @Override + public void onReceive(Object message) throws Exception { + if (message instanceof Persistent) { + Persistent p = (Persistent)message; + System.out.println(String.format("processor received %s (sequence nr = %d)", p.payload(), p.sequenceNr())); + } + } + } + + public static class ExampleView extends UntypedView { + private final ActorRef destination = getContext().actorOf(Props.create(ExampleDestination.class)); + private final ActorRef channel = getContext().actorOf(Channel.props("channel")); + + private int numReplicated = 0; + + @Override + public String viewId() { + return "view-5"; + } + + @Override + public String processorId() { + return "processor-5"; + } + + @Override + public void onReceive(Object message) throws Exception { + if (message instanceof Persistent) { + Persistent p = (Persistent)message; + numReplicated += 1; + System.out.println(String.format("view received %s (sequence nr = %d, num replicated = %d)", p.payload(), p.sequenceNr(), numReplicated)); + channel.tell(Deliver.create(p.withPayload("replicated-" + p.payload()), destination.path()), getSelf()); + } else if (message instanceof SnapshotOffer) { + SnapshotOffer so = (SnapshotOffer)message; + numReplicated = (Integer)so.snapshot(); + System.out.println(String.format("view received snapshot offer %s (metadata = %s)", numReplicated, so.metadata())); + } else if (message.equals("snap")) { + saveSnapshot(numReplicated); + } + } + } + + public static class ExampleDestination extends UntypedActor { + @Override + public void onReceive(Object message) throws Exception { + if (message instanceof ConfirmablePersistent) { + ConfirmablePersistent cp = (ConfirmablePersistent)message; + System.out.println(String.format("destination received %s (sequence nr = %s)", cp.payload(), cp.sequenceNr())); + cp.confirm(); + } + } + } + + public static void main(String... args) throws Exception { + final ActorSystem system = ActorSystem.create("example"); + final ActorRef processor = system.actorOf(Props.create(ExampleProcessor.class)); + final ActorRef view = system.actorOf(Props.create(ExampleView.class)); + + Scanner scanner = new Scanner(System.in); + + while (scanner.hasNextLine()) { + String line = scanner.nextLine(); + if (line.equals("exit")) { + break; + } else if (line.equals("sync")) { + view.tell(Update.create(false), null); + } else if (line.equals("snap")) { + view.tell("snap", null); + } else { + processor.tell(Persistent.create(line), null); + } + } + + system.shutdown(); + } +} diff --git a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ConversationRecoveryExample.scala b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ConversationRecoveryExample.scala index b7d6090844..adf78a8ae5 100644 --- a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ConversationRecoveryExample.scala +++ b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ConversationRecoveryExample.scala @@ -21,8 +21,8 @@ object ConversationRecoveryExample extends App { println(s"received ping ${counter} times ...") m.confirm() if (!recoveryRunning) Thread.sleep(1000) - pongChannel ! Deliver(m.withPayload(Pong), sender, Resolve.Destination) - case "init" => if (counter == 0) pongChannel ! Deliver(Persistent(Pong), sender) + pongChannel ! Deliver(m.withPayload(Pong), sender.path) + case "init" => if (counter == 0) pongChannel ! Deliver(Persistent(Pong), sender.path) } override def preStart() = () @@ -38,7 +38,7 @@ object ConversationRecoveryExample extends App { println(s"received pong ${counter} times ...") m.confirm() if (!recoveryRunning) Thread.sleep(1000) - pingChannel ! Deliver(m.withPayload(Ping), sender, Resolve.Destination) + pingChannel ! Deliver(m.withPayload(Ping), sender.path) } override def preStart() = () diff --git a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorChannelExample.scala b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorChannelExample.scala index 1292d39f17..4c1ce47806 100644 --- a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorChannelExample.scala +++ b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorChannelExample.scala @@ -5,20 +5,19 @@ package sample.persistence import akka.actor._ -import akka.pattern.ask import akka.persistence._ -import akka.util.Timeout object ProcessorChannelExample extends App { class ExampleProcessor extends Processor { val channel = context.actorOf(Channel.props, "channel") val destination = context.actorOf(Props[ExampleDestination]) - var received: List[Persistent] = Nil def receive = { case p @ Persistent(payload, _) => println(s"processed ${payload}") - channel forward Deliver(p.withPayload(s"processed ${payload}"), destination) + channel ! Deliver(p.withPayload(s"processed ${payload}"), destination.path) + case reply: String => + println(s"reply = ${reply}") } } @@ -34,11 +33,8 @@ object ProcessorChannelExample extends App { val system = ActorSystem("example") val processor = system.actorOf(Props(classOf[ExampleProcessor]), "processor-1") - implicit val timeout = Timeout(3000) - import system.dispatcher - - processor ? Persistent("a") onSuccess { case reply => println(s"reply = ${reply}") } - processor ? Persistent("b") onSuccess { case reply => println(s"reply = ${reply}") } + processor ! Persistent("a") + processor ! Persistent("b") Thread.sleep(1000) system.shutdown() diff --git a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorChannelRemoteExample.scala b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorChannelRemoteExample.scala new file mode 100644 index 0000000000..81c0f93bea --- /dev/null +++ b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorChannelRemoteExample.scala @@ -0,0 +1,105 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ + +package sample.persistence + +import scala.concurrent.duration._ + +import com.typesafe.config._ + +import akka.actor._ +import akka.persistence._ + +object ProcessorChannelRemoteExample { + val config = ConfigFactory.parseString( + """ + akka { + actor { + provider = "akka.remote.RemoteActorRefProvider" + } + remote { + enabled-transports = ["akka.remote.netty.tcp"] + netty.tcp.hostname = "127.0.0.1" + } + persistence { + journal.leveldb.dir = "target/example/journal" + snapshot-store.local.dir = "target/example/snapshots" + } + loglevel = INFO + log-dead-letters = 0 + log-dead-letters-during-shutdown = off + + } + """) +} + +object SenderApp extends App { + import ProcessorChannelRemoteExample._ + + class ExampleProcessor(destination: ActorPath) extends Processor { + val listener = context.actorOf(Props[ExampleListener]) + val channel = context.actorOf(Channel.props(ChannelSettings( + redeliverMax = 5, + redeliverInterval = 1.second, + redeliverFailureListener = Some(listener))), "channel") + + def receive = { + case p @ Persistent(payload, _) => + println(s"[processor] received payload: ${payload} (replayed = ${recoveryRunning})") + channel ! Deliver(p.withPayload(s"processed ${payload}"), destination) + case "restart" => + throw new Exception("restart requested") + case reply: String => + println(s"[processor] received reply: ${reply}") + } + } + + class ExampleListener extends Actor { + def receive = { + case RedeliverFailure(messages) => + println(s"unable to deliver ${messages.length} messages, restarting processor to resend messages ...") + context.parent ! "restart" + } + } + + val receiverPath = ActorPath.fromString("akka.tcp://receiver@127.0.0.1:44317/user/receiver") + val senderConfig = ConfigFactory.parseString(""" + akka.persistence.journal.leveldb.dir = "target/example/journal" + akka.persistence.snapshot-store.local.dir = "target/example/snapshots" + akka.remote.netty.tcp.port = 44316 + """) + + val system = ActorSystem("sender", config.withFallback(senderConfig)) + val sender = system.actorOf(Props(classOf[ExampleProcessor], receiverPath)) + + @annotation.tailrec + def read(line: String): Unit = line match { + case "exit" | null => + case msg => + sender ! Persistent(msg) + read(Console.readLine()) + } + + read(Console.readLine()) + system.shutdown() + +} + +object ReceiverApp extends App { + import ProcessorChannelRemoteExample._ + + class ExampleDestination extends Actor { + def receive = { + case p @ ConfirmablePersistent(payload, snr, _) => + println(s"[destination] received payload: ${payload}") + sender ! s"re: ${payload} (snr = ${snr})" + p.confirm() + } + } + + val receiverConfig = ConfigFactory.parseString("akka.remote.netty.tcp.port = 44317") + val system = ActorSystem("receiver", config.withFallback(receiverConfig)) + + system.actorOf(Props[ExampleDestination], "receiver") +} \ No newline at end of file diff --git a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ViewExample.scala b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ViewExample.scala new file mode 100644 index 0000000000..6616e1ed61 --- /dev/null +++ b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ViewExample.scala @@ -0,0 +1,71 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ + +package sample.persistence + +import akka.actor._ +import akka.persistence._ + +object ViewExample extends App { + class ExampleProcessor extends Processor { + override def processorId = "processor-5" + + def receive = { + case Persistent(payload, sequenceNr) => + println(s"processor received ${payload} (sequence nr = ${sequenceNr})") + } + } + + class ExampleView extends View { + private var numReplicated = 0 + + override def processorId = "processor-5" + override def viewId = "view-5" + + private val destination = context.actorOf(Props[ExampleDestination]) + private val channel = context.actorOf(Channel.props("channel")) + + def receive = { + case "snap" => + saveSnapshot(numReplicated) + case SnapshotOffer(metadata, snapshot: Int) => + numReplicated = snapshot + println(s"view received snapshot offer ${snapshot} (metadata = ${metadata})") + case Persistent(payload, sequenceNr) => + numReplicated += 1 + println(s"view received ${payload} (sequence nr = ${sequenceNr}, num replicated = ${numReplicated})") + channel ! Deliver(Persistent(s"replicated-${payload}"), destination.path) + } + } + + class ExampleDestination extends Actor { + def receive = { + case cp @ ConfirmablePersistent(payload, sequenceNr, _) => + println(s"destination received ${payload} (sequence nr = ${sequenceNr})") + cp.confirm() + } + } + + val system = ActorSystem("example") + + val processor = system.actorOf(Props(classOf[ExampleProcessor])) + val view = system.actorOf(Props(classOf[ExampleView])) + + @annotation.tailrec + def read(line: String): Unit = line match { + case "exit" | null => + case "sync" => + view ! Update(await = false) + read(Console.readLine()) + case "snap" => + view ! "snap" + read(Console.readLine()) + case msg => + processor ! Persistent(msg) + read(Console.readLine()) + } + + read(Console.readLine()) + system.shutdown() +}