Merge pull request #18051 from akka/wip-16541-persistence-query
+per #16541 initial version of the Persistence Query module
This commit is contained in:
commit
5a6ee8bd49
30 changed files with 2063 additions and 11 deletions
|
|
@ -0,0 +1,376 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
|
||||
package docs.persistence;
|
||||
|
||||
import static akka.pattern.Patterns.ask;
|
||||
|
||||
import akka.actor.*;
|
||||
import akka.dispatch.Mapper;
|
||||
import akka.event.EventStreamSpec;
|
||||
import akka.japi.Function;
|
||||
import akka.japi.Procedure;
|
||||
import akka.japi.pf.ReceiveBuilder;
|
||||
import akka.pattern.BackoffSupervisor;
|
||||
import akka.persistence.*;
|
||||
import akka.persistence.query.*;
|
||||
import akka.persistence.query.javadsl.ReadJournal;
|
||||
import akka.stream.ActorMaterializer;
|
||||
import akka.stream.javadsl.Sink;
|
||||
import akka.stream.javadsl.Source;
|
||||
import akka.util.Timeout;
|
||||
import docs.persistence.query.MyEventsByTagPublisher;
|
||||
import docs.persistence.query.PersistenceQueryDocSpec;
|
||||
import org.reactivestreams.Subscriber;
|
||||
import scala.collection.Seq;
|
||||
import scala.collection.immutable.Vector;
|
||||
import scala.concurrent.Await;
|
||||
import scala.concurrent.Future;
|
||||
import scala.concurrent.duration.Duration;
|
||||
import scala.concurrent.duration.FiniteDuration;
|
||||
import scala.runtime.Boxed;
|
||||
import scala.runtime.BoxedUnit;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class PersistenceQueryDocTest {
|
||||
|
||||
final ActorSystem system = ActorSystem.create();
|
||||
final ActorMaterializer mat = ActorMaterializer.create(system);
|
||||
|
||||
//#my-read-journal
|
||||
class MyReadJournal implements ReadJournal {
|
||||
private final ExtendedActorSystem system;
|
||||
|
||||
public MyReadJournal(ExtendedActorSystem system) {
|
||||
this.system = system;
|
||||
}
|
||||
|
||||
final FiniteDuration defaultRefreshInterval = FiniteDuration.create(3, TimeUnit.SECONDS);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public <T, M> Source<T, M> query(Query<T, M> q, Hint... hints) {
|
||||
if (q instanceof EventsByTag) {
|
||||
final EventsByTag eventsByTag = (EventsByTag) q;
|
||||
final String tag = eventsByTag.tag();
|
||||
long offset = eventsByTag.offset();
|
||||
|
||||
final Props props = MyEventsByTagPublisher.props(tag, offset, refreshInterval(hints));
|
||||
|
||||
return (Source<T, M>) Source.<EventEnvelope>actorPublisher(props)
|
||||
.mapMaterializedValue(noMaterializedValue());
|
||||
} else {
|
||||
// unsuported
|
||||
return Source.<T>failed(
|
||||
new UnsupportedOperationException(
|
||||
"Query " + q + " not supported by " + getClass().getName()))
|
||||
.mapMaterializedValue(noMaterializedValue());
|
||||
}
|
||||
}
|
||||
|
||||
private FiniteDuration refreshInterval(Hint[] hints) {
|
||||
for (Hint hint : hints)
|
||||
if (hint instanceof RefreshInterval)
|
||||
return ((RefreshInterval) hint).interval();
|
||||
|
||||
return defaultRefreshInterval;
|
||||
}
|
||||
|
||||
private <I, M> akka.japi.function.Function<I, M> noMaterializedValue() {
|
||||
return param -> (M) null;
|
||||
}
|
||||
}
|
||||
//#my-read-journal
|
||||
|
||||
void demonstrateBasicUsage() {
|
||||
final ActorSystem system = ActorSystem.create();
|
||||
|
||||
//#basic-usage
|
||||
// obtain read journal by plugin id
|
||||
final ReadJournal readJournal =
|
||||
PersistenceQuery.get(system)
|
||||
.getReadJournalFor("akka.persistence.query.noop-read-journal");
|
||||
|
||||
// issue query to journal
|
||||
Source<Object, BoxedUnit> source =
|
||||
readJournal.query(EventsByPersistenceId.create("user-1337", 0, Long.MAX_VALUE));
|
||||
|
||||
// materialize stream, consuming events
|
||||
ActorMaterializer mat = ActorMaterializer.create(system);
|
||||
source.runForeach(event -> System.out.println("Event: " + event), mat);
|
||||
//#basic-usage
|
||||
}
|
||||
|
||||
void demonstrateAllPersistenceIdsLive() {
|
||||
final ReadJournal readJournal =
|
||||
PersistenceQuery.get(system)
|
||||
.getReadJournalFor("akka.persistence.query.noop-read-journal");
|
||||
|
||||
//#all-persistence-ids-live
|
||||
readJournal.query(AllPersistenceIds.getInstance());
|
||||
//#all-persistence-ids-live
|
||||
}
|
||||
|
||||
void demonstrateNoRefresh() {
|
||||
final ActorSystem system = ActorSystem.create();
|
||||
|
||||
final ReadJournal readJournal =
|
||||
PersistenceQuery.get(system)
|
||||
.getReadJournalFor("akka.persistence.query.noop-read-journal");
|
||||
|
||||
//#all-persistence-ids-snap
|
||||
readJournal.query(AllPersistenceIds.getInstance(), NoRefresh.getInstance());
|
||||
//#all-persistence-ids-snap
|
||||
}
|
||||
|
||||
void demonstrateRefresh() {
|
||||
final ActorSystem system = ActorSystem.create();
|
||||
|
||||
final ReadJournal readJournal =
|
||||
PersistenceQuery.get(system)
|
||||
.getReadJournalFor("akka.persistence.query.noop-read-journal");
|
||||
|
||||
//#events-by-persistent-id-refresh
|
||||
final RefreshInterval refresh = RefreshInterval.create(1, TimeUnit.SECONDS);
|
||||
readJournal.query(EventsByPersistenceId.create("user-us-1337"), refresh);
|
||||
//#events-by-persistent-id-refresh
|
||||
}
|
||||
|
||||
void demonstrateEventsByTag() {
|
||||
final ActorSystem system = ActorSystem.create();
|
||||
final ActorMaterializer mat = ActorMaterializer.create(system);
|
||||
|
||||
final ReadJournal readJournal =
|
||||
PersistenceQuery.get(system)
|
||||
.getReadJournalFor("akka.persistence.query.noop-read-journal");
|
||||
|
||||
//#events-by-tag
|
||||
// assuming journal is able to work with numeric offsets we can:
|
||||
final Source<EventEnvelope, BoxedUnit> blueThings =
|
||||
readJournal.query(EventsByTag.create("blue"));
|
||||
|
||||
// find top 10 blue things:
|
||||
final Future<List<Object>> top10BlueThings =
|
||||
(Future<List<Object>>) blueThings
|
||||
.map(t -> t.event())
|
||||
.take(10) // cancels the query stream after pulling 10 elements
|
||||
.<List<Object>>runFold(new ArrayList<>(10), (acc, e) -> {
|
||||
acc.add(e);
|
||||
return acc;
|
||||
}, mat);
|
||||
|
||||
// start another query, from the known offset
|
||||
Source<EventEnvelope, BoxedUnit> blue = readJournal.query(EventsByTag.create("blue", 10));
|
||||
//#events-by-tag
|
||||
}
|
||||
//#materialized-query-metadata-classes
|
||||
// a plugin can provide:
|
||||
|
||||
//#materialized-query-metadata-classes
|
||||
|
||||
static
|
||||
//#materialized-query-metadata-classes
|
||||
final class QueryMetadata {
|
||||
public final boolean deterministicOrder;
|
||||
public final boolean infinite;
|
||||
|
||||
public QueryMetadata(Boolean deterministicOrder, Boolean infinite) {
|
||||
this.deterministicOrder = deterministicOrder;
|
||||
this.infinite = infinite;
|
||||
}
|
||||
}
|
||||
|
||||
//#materialized-query-metadata-classes
|
||||
|
||||
static
|
||||
//#materialized-query-metadata-classes
|
||||
final class AllEvents implements Query<Object, QueryMetadata> {
|
||||
private AllEvents() {}
|
||||
private static AllEvents INSTANCE = new AllEvents();
|
||||
}
|
||||
|
||||
//#materialized-query-metadata-classes
|
||||
|
||||
void demonstrateMaterializedQueryValues() {
|
||||
final ActorSystem system = ActorSystem.create();
|
||||
final ActorMaterializer mat = ActorMaterializer.create(system);
|
||||
|
||||
final ReadJournal readJournal =
|
||||
PersistenceQuery.get(system)
|
||||
.getReadJournalFor("akka.persistence.query.noop-read-journal");
|
||||
|
||||
//#materialized-query-metadata
|
||||
|
||||
final Source<Object, QueryMetadata> events = readJournal.query(AllEvents.INSTANCE);
|
||||
|
||||
events.mapMaterializedValue(meta -> {
|
||||
System.out.println("The query is: " +
|
||||
"ordered deterministically: " + meta.deterministicOrder + " " +
|
||||
"infinite: " + meta.infinite);
|
||||
return meta;
|
||||
});
|
||||
//#materialized-query-metadata
|
||||
}
|
||||
|
||||
class ReactiveStreamsCompatibleDBDriver {
|
||||
Subscriber<List<Object>> batchWriter() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
void demonstrateWritingIntoDifferentStore() {
|
||||
final ActorSystem system = ActorSystem.create();
|
||||
final ActorMaterializer mat = ActorMaterializer.create(system);
|
||||
|
||||
final ReadJournal readJournal =
|
||||
PersistenceQuery.get(system)
|
||||
.getReadJournalFor("akka.persistence.query.noop-read-journal");
|
||||
|
||||
|
||||
//#projection-into-different-store-rs
|
||||
final ReactiveStreamsCompatibleDBDriver driver = new ReactiveStreamsCompatibleDBDriver();
|
||||
final Subscriber<List<Object>> dbBatchWriter = driver.batchWriter();
|
||||
|
||||
// Using an example (Reactive Streams) Database driver
|
||||
readJournal
|
||||
.query(EventsByPersistenceId.create("user-1337"))
|
||||
.grouped(20) // batch inserts into groups of 20
|
||||
.runWith(Sink.create(dbBatchWriter), mat); // write batches to read-side database
|
||||
//#projection-into-different-store-rs
|
||||
}
|
||||
|
||||
//#projection-into-different-store-simple-classes
|
||||
class ExampleStore {
|
||||
Future<Void> save(Object any) {
|
||||
// ...
|
||||
//#projection-into-different-store-simple-classes
|
||||
return null;
|
||||
//#projection-into-different-store-simple-classes
|
||||
}
|
||||
}
|
||||
//#projection-into-different-store-simple-classes
|
||||
|
||||
void demonstrateWritingIntoDifferentStoreWithMapAsync() {
|
||||
final ActorSystem system = ActorSystem.create();
|
||||
final ActorMaterializer mat = ActorMaterializer.create(system);
|
||||
|
||||
final ReadJournal readJournal =
|
||||
PersistenceQuery.get(system)
|
||||
.getReadJournalFor("akka.persistence.query.noop-read-journal");
|
||||
|
||||
|
||||
//#projection-into-different-store-simple
|
||||
final ExampleStore store = new ExampleStore();
|
||||
|
||||
readJournal
|
||||
.query(EventsByTag.create("bid"))
|
||||
.mapAsync(1, store::save)
|
||||
.runWith(Sink.ignore(), mat);
|
||||
//#projection-into-different-store-simple
|
||||
}
|
||||
|
||||
//#projection-into-different-store
|
||||
class MyResumableProjection {
|
||||
private final String name;
|
||||
|
||||
public MyResumableProjection(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public Future<Long> saveProgress(long offset) {
|
||||
// ...
|
||||
//#projection-into-different-store
|
||||
return null;
|
||||
//#projection-into-different-store
|
||||
}
|
||||
public Future<Long> latestOffset() {
|
||||
// ...
|
||||
//#projection-into-different-store
|
||||
return null;
|
||||
//#projection-into-different-store
|
||||
}
|
||||
}
|
||||
//#projection-into-different-store
|
||||
|
||||
|
||||
void demonstrateWritingIntoDifferentStoreWithResumableProjections() throws Exception {
|
||||
final ActorSystem system = ActorSystem.create();
|
||||
final ActorMaterializer mat = ActorMaterializer.create(system);
|
||||
|
||||
final ReadJournal readJournal =
|
||||
PersistenceQuery.get(system)
|
||||
.getReadJournalFor("akka.persistence.query.noop-read-journal");
|
||||
|
||||
|
||||
//#projection-into-different-store-actor-run
|
||||
final Timeout timeout = Timeout.apply(3, TimeUnit.SECONDS);
|
||||
|
||||
final MyResumableProjection bidProjection = new MyResumableProjection("bid");
|
||||
|
||||
final Props writerProps = Props.create(TheOneWhoWritesToQueryJournal.class, "bid");
|
||||
final ActorRef writer = system.actorOf(writerProps, "bid-projection-writer");
|
||||
|
||||
long startFromOffset = Await.result(bidProjection.latestOffset(), timeout.duration());
|
||||
|
||||
readJournal
|
||||
.query(EventsByTag.create("bid", startFromOffset))
|
||||
.<Long>mapAsync(8, envelope -> {
|
||||
final Future<Object> f = ask(writer, envelope.event(), timeout);
|
||||
return f.<Long>map(new Mapper<Object, Long>() {
|
||||
@Override public Long apply(Object in) {
|
||||
return envelope.offset();
|
||||
}
|
||||
}, system.dispatcher());
|
||||
})
|
||||
.mapAsync(1, offset -> bidProjection.saveProgress(offset))
|
||||
.runWith(Sink.ignore(), mat);
|
||||
}
|
||||
|
||||
//#projection-into-different-store-actor-run
|
||||
|
||||
class ComplexState {
|
||||
|
||||
boolean readyToSave() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static class Record {
|
||||
static Record of(Object any) {
|
||||
return new Record();
|
||||
}
|
||||
}
|
||||
|
||||
//#projection-into-different-store-actor
|
||||
final class TheOneWhoWritesToQueryJournal extends AbstractActor {
|
||||
private final ExampleStore store;
|
||||
|
||||
private ComplexState state = new ComplexState();
|
||||
|
||||
public TheOneWhoWritesToQueryJournal() {
|
||||
store = new ExampleStore();
|
||||
|
||||
receive(ReceiveBuilder.matchAny(message -> {
|
||||
state = updateState(state, message);
|
||||
|
||||
// example saving logic that requires state to become ready:
|
||||
if (state.readyToSave())
|
||||
store.save(Record.of(state));
|
||||
|
||||
}).build());
|
||||
}
|
||||
|
||||
|
||||
ComplexState updateState(ComplexState state, Object msg) {
|
||||
// some complicated aggregation logic here ...
|
||||
return state;
|
||||
}
|
||||
}
|
||||
//#projection-into-different-store-actor
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* Copyright (C) 2015 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
|
||||
package docs.persistence.query;
|
||||
|
||||
import akka.actor.Cancellable;
|
||||
import akka.actor.Scheduler;
|
||||
import akka.japi.Pair;
|
||||
import akka.japi.pf.ReceiveBuilder;
|
||||
import akka.persistence.PersistentRepr;
|
||||
import akka.serialization.Serialization;
|
||||
import akka.serialization.SerializationExtension;
|
||||
import akka.stream.actor.AbstractActorPublisher;
|
||||
import scala.Int;
|
||||
|
||||
import akka.actor.Props;
|
||||
import akka.persistence.query.EventEnvelope;
|
||||
import akka.stream.actor.ActorPublisherMessage.Cancel;
|
||||
|
||||
import scala.concurrent.duration.FiniteDuration;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
//#events-by-tag-publisher
|
||||
class MyEventsByTagJavaPublisher extends AbstractActorPublisher<EventEnvelope> {
|
||||
private final Serialization serialization =
|
||||
SerializationExtension.get(context().system());
|
||||
|
||||
private final Connection connection;
|
||||
|
||||
private final String tag;
|
||||
|
||||
private final String CONTINUE = "CONTINUE";
|
||||
private final int LIMIT = 1000;
|
||||
private long currentOffset;
|
||||
private List<EventEnvelope> buf = new LinkedList<>();
|
||||
|
||||
private Cancellable continueTask;
|
||||
|
||||
public MyEventsByTagJavaPublisher(Connection connection,
|
||||
String tag,
|
||||
Long offset,
|
||||
FiniteDuration refreshInterval) {
|
||||
this.connection = connection;
|
||||
this.tag = tag;
|
||||
this.currentOffset = offset;
|
||||
|
||||
final Scheduler scheduler = context().system().scheduler();
|
||||
this.continueTask = scheduler
|
||||
.schedule(refreshInterval, refreshInterval, self(), CONTINUE,
|
||||
context().dispatcher(), self());
|
||||
|
||||
receive(ReceiveBuilder
|
||||
.matchEquals(CONTINUE, (in) -> {
|
||||
query();
|
||||
deliverBuf();
|
||||
})
|
||||
.match(Cancel.class, (in) -> {
|
||||
context().stop(self());
|
||||
})
|
||||
.build());
|
||||
}
|
||||
|
||||
public static Props props(Connection conn, String tag, Long offset, FiniteDuration refreshInterval) {
|
||||
return Props.create(() -> new MyEventsByTagJavaPublisher(conn, tag, offset, refreshInterval));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postStop() {
|
||||
continueTask.cancel();
|
||||
}
|
||||
|
||||
private void query() {
|
||||
if (buf.isEmpty()) {
|
||||
final String query = "SELECT id, persistent_repr " +
|
||||
"FROM journal WHERE tag = ? AND id >= ? " +
|
||||
"ORDER BY id LIMIT ?";
|
||||
|
||||
try (PreparedStatement s = connection.prepareStatement(query)) {
|
||||
s.setString(1, tag);
|
||||
s.setLong(2, currentOffset);
|
||||
s.setLong(3, LIMIT);
|
||||
try (ResultSet rs = s.executeQuery()) {
|
||||
|
||||
final List<Pair<Long, byte[]>> res = new ArrayList<>(LIMIT);
|
||||
while (rs.next())
|
||||
res.add(Pair.create(rs.getLong(1), rs.getBytes(2)));
|
||||
|
||||
if (!res.isEmpty()) {
|
||||
currentOffset = res.get(res.size() - 1).first();
|
||||
}
|
||||
|
||||
buf = res.stream().map(in -> {
|
||||
final Long id = in.first();
|
||||
final byte[] bytes = in.second();
|
||||
|
||||
final PersistentRepr p = serialization.deserialize(bytes, PersistentRepr.class).get();
|
||||
|
||||
return new EventEnvelope(id, p.persistenceId(), p.sequenceNr(), p.payload());
|
||||
}).collect(toList());
|
||||
}
|
||||
} catch(Exception e) {
|
||||
onErrorThenStop(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void deliverBuf() {
|
||||
while (totalDemand() > 0 && !buf.isEmpty())
|
||||
onNext(buf.remove(0));
|
||||
}
|
||||
}
|
||||
//#events-by-tag-publisher
|
||||
|
|
@ -12,4 +12,5 @@ Actors
|
|||
routing
|
||||
fsm
|
||||
persistence
|
||||
persistence-query
|
||||
testing
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ Architecture
|
|||
* *AbstractPersistentActorAtLeastOnceDelivery*: To send messages with at-least-once delivery semantics to destinations, also in
|
||||
case of sender and receiver JVM crashes.
|
||||
|
||||
* *Journal*: A journal stores the sequence of messages sent to a persistent actor. An application can control which messages
|
||||
* *AsyncWriteJournal*: A journal stores the sequence of messages sent to a persistent actor. An application can control which messages
|
||||
are journaled and which are received by the persistent actor without being journaled. The storage backend of a journal is pluggable.
|
||||
Persistence extension comes with a "leveldb" journal plugin, which writes to the local filesystem,
|
||||
and replicated journals are available as `Community plugins`_.
|
||||
|
|
|
|||
229
akka-docs/rst/java/persistence-query.rst
Normal file
229
akka-docs/rst/java/persistence-query.rst
Normal file
|
|
@ -0,0 +1,229 @@
|
|||
.. _persistence-query-java:
|
||||
|
||||
#################
|
||||
Persistence Query
|
||||
#################
|
||||
|
||||
Akka persistence query complements :ref:`persistence-java` by providing a universal asynchronous stream based
|
||||
query interface that various journal plugins can implement in order to expose their query capabilities.
|
||||
|
||||
The most typical use case of persistence query is implementing the so-called query side (also known as "read side")
|
||||
in the popular CQRS architecture pattern - in which the writing side of the application (e.g. implemented using akka
|
||||
persistence) is completely separated from the "query side". Akka Persistence Query itself is *not* directly the query
|
||||
side of an application, however it can help to migrate data from the write side to the query side database. In very
|
||||
simple scenarios Persistence Query may be powerful enough to fulful the query needs of your app, however we highly
|
||||
recommend (in the spirit of CQRS) of splitting up the write/read sides into separate datastores as the need arrises.
|
||||
|
||||
.. warning::
|
||||
|
||||
This module is marked as **“experimental”** as of its introduction in Akka 2.4.0. We will continue to
|
||||
improve this API based on our users’ feedback, which implies that while we try to keep incompatible
|
||||
changes to a minimum the binary compatibility guarantee for maintenance releases does not apply to the
|
||||
contents of the ``akka.persistence.query`` package.
|
||||
|
||||
Dependencies
|
||||
============
|
||||
|
||||
Akka persistence query is a separate jar file. Make sure that you have the following dependency in your project::
|
||||
|
||||
"com.typesafe.akka" %% "akka-persistence-query-experimental" % "@version@" @crossString@
|
||||
|
||||
Design overview
|
||||
===============
|
||||
|
||||
Akka persistence query is purposely designed to be a very loosely specified API.
|
||||
This is in order to keep the provided APIs general enough for each journal implementation to be able to expose its best
|
||||
features, e.g. a SQL journal can use complex SQL queries or if a journal is able to subscribe to a live event stream
|
||||
this should also be possible to expose the same API - a typed stream of events.
|
||||
|
||||
**Each read journal must explicitly document which types of queries it supports.**
|
||||
Refer to the your journal's plugins documentation for details on which queries and semantics it supports.
|
||||
|
||||
While Akka Persistence Query does not provide actual implementations of ReadJournals, it defines a number of pre-defined
|
||||
query types for the most common query scenarios, that most journals are likely to implement (however they are not required to).
|
||||
|
||||
Read Journals
|
||||
=============
|
||||
|
||||
In order to issue queries one has to first obtain an instance of a ``ReadJournal``.
|
||||
Read journals are implemented as `Community plugins`_, each targeting a specific datastore (for example Cassandra or JDBC
|
||||
databases). For example, given a library that provides a ``akka.persistence.query.noop-read-journal`` obtaining the related
|
||||
journal is as simple as:
|
||||
|
||||
.. includecode:: code/docs/persistence/PersistenceQueryDocTest.java#basic-usage
|
||||
|
||||
Journal implementers are encouraged to put this identifier in a variable known to the user, such that one can access it via
|
||||
``getJournalFor(NoopJournal.identifier)``, however this is not enforced.
|
||||
|
||||
Read journal implementations are available as `Community plugins`_.
|
||||
|
||||
|
||||
Predefined queries
|
||||
------------------
|
||||
Akka persistence query comes with a number of ``Query`` objects built in and suggests Journal implementors to implement
|
||||
them according to the semantics described below. It is important to notice that while these query types are very common
|
||||
a journal is not obliged to implement all of them - for example because in a given journal such query would be
|
||||
significantly inefficient.
|
||||
|
||||
.. note::
|
||||
Refer to the documentation of the :class:`ReadJournal` plugin you are using for a specific list of supported query types.
|
||||
For example, Journal plugins should document their stream completion strategies.
|
||||
|
||||
The predefined queries are:
|
||||
|
||||
``AllPersistenceIds`` which is designed to allow users to subscribe to a stream of all persistent ids in the system.
|
||||
By default this stream should be assumed to be a "live" stream, which means that the journal should keep emitting new
|
||||
persistence ids as they come into the system:
|
||||
|
||||
.. includecode:: code/docs/persistence/PersistenceQueryDocTest.java#all-persistence-ids-live
|
||||
|
||||
If your usage does not require a live stream, you can disable refreshing by using *hints*, providing the built-in
|
||||
``NoRefresh`` hint to the query:
|
||||
|
||||
.. includecode:: code/docs/persistence/PersistenceQueryDocTest.java#all-persistence-ids-snap
|
||||
|
||||
``EventsByPersistenceId`` is a query equivalent to replaying a :ref:`PersistentActor <event-sourcing>`,
|
||||
however, since it is a stream it is possible to keep it alive and watch for additional incoming events persisted by the
|
||||
persistent actor identified by the given ``persistenceId``. Most journals will have to revert to polling in order to achieve
|
||||
this, which can be configured using the ``RefreshInterval`` query hint:
|
||||
|
||||
.. includecode:: code/docs/persistence/PersistenceQueryDocTest.java#events-by-persistent-id-refresh
|
||||
|
||||
``EventsByTag`` allows querying events regardles of which ``persistenceId`` they are associated with. This query is hard to
|
||||
implement in some journals or may need some additional preparation of the used data store to be executed efficiently,
|
||||
please refer to your read journal plugin's documentation to find out if and how it is supported. The goal of this query
|
||||
is to allow querying for all events which are "tagged" with a specific tag - again, how exactly this is implemented
|
||||
depends on the used journal.
|
||||
|
||||
.. note::
|
||||
A very important thing to keep in mind when using queries spanning multiple persistenceIds, such as ``EventsByTag``
|
||||
is that the order of events at which the events appear in the stream rarely is guaranteed (or stable between materializations).
|
||||
|
||||
Journals *may* choose to opt for strict ordering of the events, and should then document explicitly what kind of ordering
|
||||
guarantee they provide - for example "*ordered by timestamp ascending, independently of persistenceId*" is easy to achieve
|
||||
on relational databases, yet may be hard to implement efficiently on plain key-value datastores.
|
||||
|
||||
In the example below we query all events which have been tagged (we assume this was performed by the write-side using an
|
||||
:ref:`EventAdapter <event-adapters-java>`, or that the journal is smart enough that it can figure out what we mean by this
|
||||
tag - for example if the journal stored the events as json it may try to find those with the field ``tag`` set to this value etc.).
|
||||
|
||||
.. includecode:: code/docs/persistence/PersistenceQueryDocTest.java#events-by-tag
|
||||
|
||||
As you can see, we can use all the usual stream combinators available from `Akka Streams`_ on the resulting query stream,
|
||||
including for example taking the first 10 and cancelling the stream. It is worth pointing out that the built-in ``EventsByTag``
|
||||
query has an optionally supported offset parameter (of type ``Long``) which the journals can use to implement resumable-streams.
|
||||
For example a journal may be able to use a WHERE clause to begin the read starting from a specific row, or in a datastore
|
||||
that is able to order events by insertion time it could treat the Long as a timestamp and select only older events.
|
||||
|
||||
|
||||
Materialized values of queries
|
||||
------------------------------
|
||||
Journals are able to provide additional information related to a query by exposing `materialized values`_,
|
||||
which are a feature of `Akka Streams`_ that allows to expose additional values at stream materialization time.
|
||||
|
||||
More advanced query journals may use this technique to expose information about the character of the materialized
|
||||
stream, for example if it's finite or infinite, strictly ordered or not ordered at all. The materialized value type
|
||||
is defined as the ``M`` type parameter of a query (``Query[T,M]``), which allows journals to provide users with their
|
||||
specialised query object, as demonstrated in the sample below:
|
||||
|
||||
.. includecode:: code/docs/persistence/PersistenceQueryDocTest.java#materialized-query-metadata-classes
|
||||
.. includecode:: code/docs/persistence/PersistenceQueryDocTest.java#materialized-query-metadata
|
||||
|
||||
.. _materialized values: http://doc.akka.io/docs/akka-stream-and-http-experimental/1.0/java/stream-quickstart.html#Materialized_values
|
||||
.. _Akka Streams: http://doc.akka.io/docs/akka-stream-and-http-experimental/1.0/java.html
|
||||
.. _Community plugins: http://akka.io/community/#plugins-to-akka-persistence-query
|
||||
|
||||
Performance and denormalization
|
||||
===============================
|
||||
When building systems using :ref:`event-sourcing` and CQRS (`Command & Query Responsibility Segragation`_) techniques
|
||||
it is tremendously important to realise that the write-side has completely different needs from the read-side,
|
||||
and separating those concerns into datastores that are optimised for either side makes it possible to offer the best
|
||||
expirience for the write and read sides independently.
|
||||
|
||||
For example, in a bidding system it is important to "take the write" and respond to the bidder that we have accepted
|
||||
the bid as soon as possible, which means that write-throughput is of highest importance for the write-side – often this
|
||||
means that data stores which are able to scale to accomodate these requirements have a less expressive query side.
|
||||
|
||||
On the other hand the same application may have some complex statistics view or we may have analists working with the data
|
||||
to figure out best bidding strategies and trends – this often requires some kind of expressive query capabilities like
|
||||
for example SQL or writing Spark jobs to analyse the data. Therefore the data stored in the write-side needs to be
|
||||
projected into the other read-optimised datastore.
|
||||
|
||||
.. note::
|
||||
When refering to **Materialized Views** in Akka Persistence think of it as "some persistent storage of the result of a Query".
|
||||
In other words, it means that the view is created once, in order to be afterwards queried multiple times, as in this format
|
||||
it may be more efficient or interesting to query it (instead of the source events directly).
|
||||
|
||||
Materialize view to Reactive Streams compatible datastore
|
||||
---------------------------------------------------------
|
||||
|
||||
If the read datastore exposes an `Reactive Streams`_ interface then implementing a simple projection
|
||||
is as simple as, using the read-journal and feeding it into the databases driver interface, for example like so:
|
||||
|
||||
.. includecode:: code/docs/persistence/PersistenceQueryDocTest.java#projection-into-different-store-rs
|
||||
|
||||
.. _Reactive Streams: http://reactive-streams.org
|
||||
|
||||
Materialize view using mapAsync
|
||||
-------------------------------
|
||||
|
||||
If the target database does not provide a reactive streams ``Subscriber`` that can perform writes,
|
||||
you may have to implement the write logic using plain functions or Actors instead.
|
||||
|
||||
In case your write logic is state-less and you just need to convert the events from one data data type to another
|
||||
before writing into the alternative datastore, then the projection is as simple as:
|
||||
|
||||
.. includecode:: code/docs/persistence/PersistenceQueryDocTest.java#projection-into-different-store-simple-classes
|
||||
.. includecode:: code/docs/persistence/PersistenceQueryDocTest.java#projection-into-different-store-simple
|
||||
|
||||
Resumable projections
|
||||
---------------------
|
||||
|
||||
Sometimes you may need to implement "resumable" projections, that will not start from the beginning of time each time
|
||||
when run. In this case you will need to store the sequence number (or ``offset``) of the processed event and use it
|
||||
the next time this projection is started. This pattern is not built-in, however is rather simple to implement yourself.
|
||||
|
||||
The example below additionally highlights how you would use Actors to implement the write side, in case
|
||||
you need to do some complex logic that would be best handled inside an Actor before persisting the event
|
||||
into the other datastore:
|
||||
|
||||
.. includecode:: code/docs/persistence/PersistenceQueryDocTest.java#projection-into-different-store-actor-run
|
||||
|
||||
.. includecode:: code/docs/persistence/PersistenceQueryDocTest.java#projection-into-different-store-actor
|
||||
|
||||
.. _Command & Query Responsibility Segragation: https://msdn.microsoft.com/en-us/library/jj554200.aspx
|
||||
|
||||
.. _read-journal-plugin-api-java:
|
||||
|
||||
Query plugins
|
||||
=============
|
||||
|
||||
Query plugins are various (mostly community driven) :class:`ReadJournal` implementations for all kinds
|
||||
of available datastores. The complete list of available plugins is maintained on the Akka Persistence Query `Community Plugins`_ page.
|
||||
|
||||
This section aims to provide tips and guide plugin developers through implementing a custom query plugin.
|
||||
Most users will not need to implement journals themselves, except if targeting a not yet supported datastore.
|
||||
|
||||
.. note::
|
||||
Since different data stores provide different query capabilities journal plugins **must extensively document**
|
||||
their exposed semantics as well as handled query scenarios.
|
||||
|
||||
ReadJournal plugin API
|
||||
----------------------
|
||||
|
||||
Journals *MUST* return a *failed* ``Source`` if they are unable to execute the passed in query.
|
||||
For example if the user accidentally passed in an ``SqlQuery()`` to a key-value journal.
|
||||
|
||||
Below is a simple journal implementation:
|
||||
|
||||
.. includecode:: code/docs/persistence/PersistenceQueryDocTest.java#my-read-journal
|
||||
|
||||
And the ``EventsByTag`` could be backed by such an Actor for example:
|
||||
|
||||
.. includecode:: code/docs/persistence/query/MyEventsByTagJavaPublisher.java#events-by-tag-publisher
|
||||
|
||||
Plugin TCK
|
||||
----------
|
||||
|
||||
TODO, not available yet.
|
||||
|
||||
|
|
@ -65,7 +65,7 @@ Architecture
|
|||
* *UntypedPersistentActorAtLeastOnceDelivery*: To send messages with at-least-once delivery semantics to destinations, also in
|
||||
case of sender and receiver JVM crashes.
|
||||
|
||||
* *Journal*: A journal stores the sequence of messages sent to a persistent actor. An application can control which messages
|
||||
* *AsyncWriteJournal*: A journal stores the sequence of messages sent to a persistent actor. An application can control which messages
|
||||
are journaled and which are received by the persistent actor without being journaled. The storage backend of a journal is pluggable.
|
||||
Persistence extension comes with a "leveldb" journal plugin, which writes to the local filesystem,
|
||||
and replicated journals are available as `Community plugins`_.
|
||||
|
|
@ -610,7 +610,7 @@ completely.
|
|||
|
||||
Event Adapters help in situations where:
|
||||
|
||||
- **Version Migration** – existing events stored in *Version 1* should be "upcasted" to a new *Version 2* representation,
|
||||
- **Version Migrations** – existing events stored in *Version 1* should be "upcasted" to a new *Version 2* representation,
|
||||
and the process of doing so involves actual code, not just changes on the serialization layer. For these scenarios
|
||||
the ``toJournal`` function is usually an identity function, however the ``fromJournal`` is implemented as
|
||||
``v1.Event=>v2.Event``, performing the neccessary mapping inside the fromJournal method.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue