=per #18288 Add docs of leveldb queries

This commit is contained in:
Patrik Nordwall 2015-08-21 11:35:51 +02:00
parent dfba334fda
commit fed622eb9f
17 changed files with 683 additions and 30 deletions

View file

@ -5,26 +5,25 @@
# This is the reference config file that contains all the default settings.
# Make your edits in your application.conf in order to override these settings.
akka.persistence.query {
journal {
leveldb {
class = "akka.persistence.query.journal.leveldb.LeveldbReadJournal"
# Absolute path to the write journal plugin configuration entry that this query journal
# will connect to. That must be a LeveldbJournal or SharedLeveldbJournal.
# If undefined (or "") it will connect to the default journal as specified by the
# akka.persistence.journal.plugin property.
write-plugin = ""
# Look for more data with this interval. The query journal is also notified by
# the write journal when something is changed and thereby updated quickly, but
# when there are a lot of changes it falls back to periodic queries to avoid
# overloading the system with many small queries.
refresh-interval = 3s
# How many events to fetch in one query and keep buffered until they
# are delivered downstreams.
max-buffer-size = 100
}
}
#//#query-leveldb
# Configuration for the LeveldbReadJournal
akka.persistence.query.journal.leveldb {
# Implementation class of the LevelDB ReadJournal
class = "akka.persistence.query.journal.leveldb.LeveldbReadJournal"
# Absolute path to the write journal plugin configuration entry that this
# query journal will connect to. That must be a LeveldbJournal or SharedLeveldbJournal.
# If undefined (or "") it will connect to the default journal as specified by the
# akka.persistence.journal.plugin property.
write-plugin = ""
# The LevelDB write journal is notifying the query side as soon as things
# are persisted, but for efficiency reasons the query side retrieves the events
# in batches that sometimes can be delayed up to the configured `refresh-interval`.
refresh-interval = 3s
# How many events to fetch in one query (replay) and keep buffered until they
# are delivered downstreams.
max-buffer-size = 100
}
#//#query-leveldb

View file

@ -22,9 +22,135 @@ import akka.util.ByteString
import java.net.URLEncoder
object LeveldbReadJournal {
/**
* The default identifier for [[LeveldbReadJournal]] to be used with
* [[akka.persistence.query.PersistenceQuery#readJournalFor]].
*
* The value is `"akka.persistence.query.journal.leveldb"` and corresponds
* to the absolute path to the read journal configuration entry.
*/
final val Identifier = "akka.persistence.query.journal.leveldb"
}
/**
* [[akka.persistence.query.scaladsl.ReadJournal]] implementation for LevelDB.
*
* It is retrieved with Scala API:
* {{{
* val queries = PersistenceQuery(system).readJournalFor(LeveldbReadJournal.Identifier)
* }}}
*
* or with Java API:
* {{{
* ReadJournal queries =
* PersistenceQuery.get(system).getReadJournalFor(LeveldbReadJournal.Identifier());
* }}}
*
* Configuration settings can be defined in the configuration section with the
* absolute path corresponding to the identifier, which is `"akka.persistence.query.journal.leveldb"`
* for the default [[LeveldbReadJournal#Identifier]]. See `reference.conf`.
*
* The following queries are supported.
*
* == EventsByPersistenceId ==
*
* [[akka.persistence.query.EventsByPersistenceId]] is used for retrieving events for a specific
* `PersistentActor` identified by `persistenceId`.
*
* You can retrieve a subset of all events by specifying `fromSequenceNr` and `toSequenceNr`
* or use `0L` and `Long.MaxValue` respectively to retrieve all events. Note that
* the corresponding sequence number of each event is provided in the
* [[akka.persistence.query.EventEnvelope]], which makes it possible to resume the
* stream at a later point from a given sequence number.
*
* The returned event stream is ordered by sequence number, i.e. the same order as the
* `PersistentActor` persisted the events. The same prefix of stream elements (in same order)
* are returned for multiple executions of the query, except for when events have been deleted.
*
* The query supports two different completion modes:
* <ul>
* <li>The stream is not completed when it reaches the end of the currently stored events,
* but it continues to push new events when new events are persisted. This is the
* default mode that is used when no hints are given. It can also be specified with
* hint [[akka.persistence.query.RefreshInterval]].</li>
* <li>The stream is completed when it reaches the end of the currently stored events.
* This mode is specified with hint [[akka.persistence.query.NoRefresh]].</li>
* </ul>
*
* The LevelDB write journal is notifying the query side as soon as events are persisted, but for
* efficiency reasons the query side retrieves the events in batches that sometimes can
* be delayed up to the configured `refresh-interval` or given [[akka.persistence.query.RefreshInterval]]
* hint.
*
* The stream is completed with failure if there is a failure in executing the query in the
* backend journal.
*
* == AllPersistenceIds ==
*
* [[akka.persistence.query.AllPersistenceIds]] is used for retrieving all `persistenceIds` of all
* persistent actors.
*
* The returned event stream is unordered and you can expect different order for multiple
* executions of the query.
*
* The query supports two different completion modes:
* <ul>
* <li>The stream is not completed when it reaches the end of the currently used `persistenceIds`,
* but it continues to push new `persistenceIds` when new persistent actors are created.
* This is the default mode that is used when no hints are given. It can also be specified with
* hint [[akka.persistence.query.RefreshInterval]].</li>
* <li>The stream is completed when it reaches the end of the currently used `persistenceIds`.
* This mode is specified with hint [[akka.persistence.query.NoRefresh]].</li>
* </ul>
*
* The LevelDB write journal is notifying the query side as soon as new `persistenceIds` are
* created and there is no periodic polling or batching involved in this query.
*
* The stream is completed with failure if there is a failure in executing the query in the
* backend journal.
*
* == EventsByTag ==
*
* [[akka.persistence.query.EventsByTag]] is used for retrieving events that were marked with
* a given tag, e.g. all events of an Aggregate Root type.
*
* To tag events you create an [[akka.persistence.journal.EventAdapter]] that wraps the events
* in a [[akka.persistence.journal.leveldb.Tagged]] with the given `tags`.
*
* You can retrieve a subset of all events by specifying `offset`, or use `0L` to retrieve all
* events with a given tag. The `offset` corresponds to an ordered sequence number for
* the specific tag. Note that the corresponding offset of each event is provided in the
* [[akka.persistence.query.EventEnvelope]], which makes it possible to resume the
* stream at a later point from a given offset.
*
* In addition to the `offset` the `EventEnvelope` also provides `persistenceId` and `sequenceNr`
* for each event. The `sequenceNr` is the sequence number for the persistent actor with the
* `persistenceId` that persisted the event. The `persistenceId` + `sequenceNr` is an unique
* identifier for the event.
*
* The returned event stream is ordered by the offset (tag sequence number), which corresponds
* to the same order as the write journal stored the events. The same stream elements (in same order)
* are returned for multiple executions of the query. Deleted events are not deleted from the
* tagged event stream.
*
* The query supports two different completion modes:
* <ul>
* <li>The stream is not completed when it reaches the end of the currently stored events,
* but it continues to push new events when new events are persisted. This is the
* default mode that is used when no hints are given. It can also be specified with
* hint [[akka.persistence.query.RefreshInterval]].</li>
* <li>The stream is completed when it reaches the end of the currently stored events.
* This mode is specified with hint [[akka.persistence.query.NoRefresh]].</li>
* </ul>
*
* The LevelDB write journal is notifying the query side as soon as tagged events are persisted, but for
* efficiency reasons the query side retrieves the events in batches that sometimes can
* be delayed up to the configured `refresh-interval` or given [[akka.persistence.query.RefreshInterval]]
* hint.
*
* The stream is completed with failure if there is a failure in executing the query in the
* backend journal.
*/
class LeveldbReadJournal(system: ExtendedActorSystem, config: Config) extends scaladsl.ReadJournal {
private val serialization = SerializationExtension(system)

View file

@ -19,6 +19,7 @@ import akka.persistence.journal.leveldb.Tagged
import akka.persistence.journal.EventSeq
import akka.persistence.journal.EventAdapter
import akka.persistence.query.EventEnvelope
import akka.persistence.journal.WriteEventAdapter
object EventsByTagSpec {
val config = """
@ -38,7 +39,7 @@ object EventsByTagSpec {
}
class ColorTagger extends EventAdapter {
class ColorTagger extends WriteEventAdapter {
val colors = Set("green", "black", "blue")
override def toJournal(event: Any): Any = event match {
case s: String
@ -48,8 +49,6 @@ class ColorTagger extends EventAdapter {
case _ event
}
override def fromJournal(event: Any, manifest: String): EventSeq = EventSeq.single(event)
override def manifest(event: Any): String = ""
}