2013-10-08 11:46:02 +02:00
|
|
|
/**
|
2015-03-07 22:58:48 -08:00
|
|
|
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
|
2013-10-08 11:46:02 +02:00
|
|
|
* Copyright (C) 2012-2013 Eligotech BV.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
package akka.persistence.journal
|
|
|
|
|
|
|
|
|
|
import akka.actor._
|
2013-12-06 12:48:44 +01:00
|
|
|
import akka.pattern.pipe
|
2013-10-08 11:46:02 +02:00
|
|
|
import akka.persistence._
|
2015-07-02 00:44:10 +02:00
|
|
|
|
2015-05-29 18:20:51 +02:00
|
|
|
import scala.collection.immutable
|
|
|
|
|
import scala.concurrent.Future
|
2015-07-02 00:44:10 +02:00
|
|
|
import scala.util.{ Failure, Success, Try }
|
2015-06-23 21:01:36 +02:00
|
|
|
import scala.util.control.NonFatal
|
2015-05-29 18:20:51 +02:00
|
|
|
|
2013-10-08 11:46:02 +02:00
|
|
|
/**
|
|
|
|
|
* Abstract journal, optimized for asynchronous, non-blocking writes.
|
|
|
|
|
*/
|
2014-06-03 16:40:44 +02:00
|
|
|
trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery {
|
2013-10-08 11:46:02 +02:00
|
|
|
import AsyncWriteJournal._
|
2015-05-29 18:20:51 +02:00
|
|
|
import JournalProtocol._
|
2013-10-08 11:46:02 +02:00
|
|
|
import context.dispatcher
|
|
|
|
|
|
2013-11-12 09:02:02 +01:00
|
|
|
private val extension = Persistence(context.system)
|
2014-01-17 06:58:25 +01:00
|
|
|
private val publish = extension.settings.internal.publishPluginCommands
|
2013-11-12 09:02:02 +01:00
|
|
|
|
2014-06-03 16:40:44 +02:00
|
|
|
private val resequencer = context.actorOf(Props[Resequencer]())
|
2013-10-08 11:46:02 +02:00
|
|
|
private var resequencerCounter = 1L
|
|
|
|
|
|
2015-06-23 21:01:36 +02:00
|
|
|
final def receive = receiveWriteJournal.orElse[Any, Unit](receivePluginInternal)
|
2015-06-17 01:23:18 +02:00
|
|
|
|
|
|
|
|
final val receiveWriteJournal: Actor.Receive = {
|
2014-12-08 11:02:14 +01:00
|
|
|
case WriteMessages(messages, persistentActor, actorInstanceId) ⇒
|
2013-10-08 11:46:02 +02:00
|
|
|
val cctr = resequencerCounter
|
2015-06-23 21:01:36 +02:00
|
|
|
resequencerCounter += messages.foldLeft(0)((acc, m) ⇒ acc + m.size) + 1
|
|
|
|
|
|
2015-08-11 12:22:38 +02:00
|
|
|
val atomicWriteCount = messages.count(_.isInstanceOf[AtomicWrite])
|
2015-06-23 21:01:36 +02:00
|
|
|
val prepared = Try(preparePersistentBatch(messages))
|
|
|
|
|
val writeResult = (prepared match {
|
|
|
|
|
case Success(prep) ⇒
|
|
|
|
|
// in case the asyncWriteMessages throws
|
|
|
|
|
try asyncWriteMessages(prep) catch { case NonFatal(e) ⇒ Future.failed(e) }
|
|
|
|
|
case f @ Failure(_) ⇒
|
|
|
|
|
// exception from preparePersistentBatch => rejected
|
|
|
|
|
Future.successful(messages.collect { case a: AtomicWrite ⇒ f })
|
|
|
|
|
}).map { results ⇒
|
2015-08-11 12:22:38 +02:00
|
|
|
if (results.nonEmpty && results.size != atomicWriteCount)
|
2015-06-23 21:01:36 +02:00
|
|
|
throw new IllegalStateException("asyncWriteMessages returned invalid number of results. " +
|
|
|
|
|
s"Expected [${prepared.get.size}], but got [${results.size}]")
|
|
|
|
|
results
|
2013-10-27 08:01:14 +01:00
|
|
|
}
|
2015-06-23 21:01:36 +02:00
|
|
|
|
|
|
|
|
writeResult.onComplete {
|
|
|
|
|
case Success(results) ⇒
|
2014-12-08 11:02:14 +01:00
|
|
|
resequencer ! Desequenced(WriteMessagesSuccessful, cctr, persistentActor, self)
|
2015-06-23 21:01:36 +02:00
|
|
|
|
2015-08-11 12:22:38 +02:00
|
|
|
val resultsIter =
|
|
|
|
|
if (results.isEmpty) Iterator.fill(atomicWriteCount)(AsyncWriteJournal.successUnit)
|
|
|
|
|
else results.iterator
|
2015-06-23 21:01:36 +02:00
|
|
|
var n = cctr + 1
|
|
|
|
|
messages.foreach {
|
|
|
|
|
case a: AtomicWrite ⇒
|
|
|
|
|
resultsIter.next() match {
|
|
|
|
|
case Success(_) ⇒
|
|
|
|
|
a.payload.foreach { p ⇒
|
|
|
|
|
resequencer ! Desequenced(WriteMessageSuccess(p, actorInstanceId), n, persistentActor, p.sender)
|
|
|
|
|
n += 1
|
|
|
|
|
}
|
|
|
|
|
case Failure(e) ⇒
|
|
|
|
|
a.payload.foreach { p ⇒
|
|
|
|
|
resequencer ! Desequenced(WriteMessageRejected(p, e, actorInstanceId), n, persistentActor, p.sender)
|
|
|
|
|
n += 1
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case r: NonPersistentRepr ⇒
|
|
|
|
|
resequencer ! Desequenced(LoopMessageSuccess(r.payload, actorInstanceId), n, persistentActor, r.sender)
|
|
|
|
|
n += 1
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-20 13:47:42 +01:00
|
|
|
case Failure(e) ⇒
|
2014-12-08 11:02:14 +01:00
|
|
|
resequencer ! Desequenced(WriteMessagesFailed(e), cctr, persistentActor, self)
|
2015-06-23 21:01:36 +02:00
|
|
|
var n = cctr + 1
|
|
|
|
|
messages.foreach {
|
|
|
|
|
case a: AtomicWrite ⇒
|
|
|
|
|
a.payload.foreach { p ⇒
|
|
|
|
|
resequencer ! Desequenced(WriteMessageFailure(p, e, actorInstanceId), n, persistentActor, p.sender)
|
|
|
|
|
n += 1
|
|
|
|
|
}
|
|
|
|
|
case r: NonPersistentRepr ⇒
|
|
|
|
|
resequencer ! Desequenced(LoopMessageSuccess(r.payload, actorInstanceId), n, persistentActor, r.sender)
|
|
|
|
|
n += 1
|
|
|
|
|
}
|
2013-10-27 08:01:14 +01:00
|
|
|
}
|
2015-06-17 01:23:18 +02:00
|
|
|
|
2015-06-25 07:44:52 +02:00
|
|
|
case r @ ReplayMessages(fromSequenceNr, toSequenceNr, max, persistenceId, persistentActor) ⇒
|
2015-06-26 08:32:05 +02:00
|
|
|
|
|
|
|
|
asyncReadHighestSequenceNr(persistenceId, fromSequenceNr).flatMap { highSeqNr ⇒
|
|
|
|
|
val toSeqNr = math.min(toSequenceNr, highSeqNr)
|
|
|
|
|
if (highSeqNr == 0L || fromSequenceNr > toSeqNr)
|
|
|
|
|
Future.successful(highSeqNr)
|
|
|
|
|
else {
|
|
|
|
|
// Send replayed messages and replay result to persistentActor directly. No need
|
|
|
|
|
// to resequence replayed messages relative to written and looped messages.
|
|
|
|
|
asyncReplayMessages(persistenceId, fromSequenceNr, toSeqNr, max) { p ⇒
|
|
|
|
|
if (!p.deleted) // old records from 2.3 may still have the deleted flag
|
|
|
|
|
adaptFromJournal(p).foreach { adaptedPersistentRepr ⇒
|
|
|
|
|
persistentActor.tell(ReplayedMessage(adaptedPersistentRepr), Actor.noSender)
|
|
|
|
|
}
|
|
|
|
|
}.map(_ ⇒ highSeqNr)
|
|
|
|
|
}
|
|
|
|
|
}.map {
|
2015-07-02 00:44:10 +02:00
|
|
|
highSeqNr ⇒ RecoverySuccess(highSeqNr)
|
2015-06-26 08:32:05 +02:00
|
|
|
}.recover {
|
2014-01-17 06:58:25 +01:00
|
|
|
case e ⇒ ReplayMessagesFailure(e)
|
2015-06-26 08:32:05 +02:00
|
|
|
}.pipeTo(persistentActor).onSuccess {
|
2014-01-23 10:43:02 +01:00
|
|
|
case _ if publish ⇒ context.system.eventStream.publish(r)
|
|
|
|
|
}
|
2015-06-17 01:23:18 +02:00
|
|
|
|
2015-06-26 10:36:27 +02:00
|
|
|
case d @ DeleteMessagesTo(persistenceId, toSequenceNr, persistentActor) ⇒
|
2015-07-02 00:44:10 +02:00
|
|
|
asyncDeleteMessagesTo(persistenceId, toSequenceNr) map {
|
|
|
|
|
case _ ⇒ DeleteMessagesSuccess(toSequenceNr)
|
|
|
|
|
} recover {
|
|
|
|
|
case e ⇒ DeleteMessagesFailure(e, toSequenceNr)
|
|
|
|
|
} pipeTo persistentActor onComplete {
|
|
|
|
|
case _ if publish ⇒ context.system.eventStream.publish(d)
|
2013-10-08 11:46:02 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//#journal-plugin-api
|
2013-10-27 08:01:14 +01:00
|
|
|
/**
|
2015-06-25 19:58:47 +02:00
|
|
|
* Plugin API: asynchronously writes a batch (`Seq`) of persistent messages to the
|
|
|
|
|
* journal.
|
2015-06-23 21:01:36 +02:00
|
|
|
*
|
|
|
|
|
* The batch is only for performance reasons, i.e. all messages don't have to be written
|
|
|
|
|
* atomically. Higher throughput can typically be achieved by using batch inserts of many
|
2015-06-25 19:58:47 +02:00
|
|
|
* records compared inserting records one-by-one, but this aspect depends on the
|
|
|
|
|
* underlying data store and a journal implementation can implement it as efficient as
|
|
|
|
|
* possible with the assumption that the messages of the batch are unrelated.
|
2015-06-23 21:01:36 +02:00
|
|
|
*
|
2015-06-25 19:58:47 +02:00
|
|
|
* Each `AtomicWrite` message contains the single `PersistentRepr` that corresponds to
|
|
|
|
|
* the event that was passed to the `persist` method of the `PersistentActor`, or it
|
|
|
|
|
* contains several `PersistentRepr` that corresponds to the events that were passed
|
|
|
|
|
* to the `persistAll` method of the `PersistentActor`. All `PersistentRepr` of the
|
|
|
|
|
* `AtomicWrite` must be written to the data store atomically, i.e. all or none must
|
|
|
|
|
* be stored. If the journal (data store) cannot support atomic writes of multiple
|
|
|
|
|
* events it should reject such writes with a `Try` `Failure` with an
|
|
|
|
|
* `UnsupportedOperationException` describing the issue. This limitation should
|
|
|
|
|
* also be documented by the journal plugin.
|
2015-06-23 21:01:36 +02:00
|
|
|
*
|
|
|
|
|
* If there are failures when storing any of the messages in the batch the returned
|
|
|
|
|
* `Future` must be completed with failure. The `Future` must only be completed with
|
|
|
|
|
* success when all messages in the batch have been confirmed to be stored successfully,
|
2015-06-25 19:58:47 +02:00
|
|
|
* i.e. they will be readable, and visible, in a subsequent replay. If there is
|
|
|
|
|
* uncertainty about if the messages were stored or not the `Future` must be completed
|
|
|
|
|
* with failure.
|
2015-06-23 21:01:36 +02:00
|
|
|
*
|
|
|
|
|
* Data store connection problems must be signaled by completing the `Future` with
|
|
|
|
|
* failure.
|
|
|
|
|
*
|
|
|
|
|
* The journal can also signal that it rejects individual messages (`AtomicWrite`) by
|
2015-08-11 12:22:38 +02:00
|
|
|
* the returned `immutable.Seq[Try[Unit]]`. It is possible but not mandatory to reduce
|
|
|
|
|
* number of allocations by returning `Future.successful(Nil)` for the happy path,
|
|
|
|
|
* i.e. when no messages are rejected. Otherwise the returned `Seq` must have as many elements
|
2015-06-25 19:58:47 +02:00
|
|
|
* as the input `messages` `Seq`. Each `Try` element signals if the corresponding
|
|
|
|
|
* `AtomicWrite` is rejected or not, with an exception describing the problem. Rejecting
|
|
|
|
|
* a message means it was not stored, i.e. it must not be included in a later replay.
|
|
|
|
|
* Rejecting a message is typically done before attempting to store it, e.g. because of
|
|
|
|
|
* serialization error.
|
2015-06-23 21:01:36 +02:00
|
|
|
*
|
|
|
|
|
* Data store connection problems must not be signaled as rejections.
|
2015-06-25 19:58:47 +02:00
|
|
|
*
|
2015-08-11 12:22:38 +02:00
|
|
|
* It is possible but not mandatory to reduce number of allocations by returning
|
|
|
|
|
* `Future.successful(Nil)` for the happy path, i.e. when no messages are rejected.
|
2013-10-27 08:01:14 +01:00
|
|
|
*/
|
2015-06-23 21:01:36 +02:00
|
|
|
def asyncWriteMessages(messages: immutable.Seq[AtomicWrite]): Future[immutable.Seq[Try[Unit]]]
|
2014-01-17 06:58:25 +01:00
|
|
|
|
2013-10-08 11:46:02 +02:00
|
|
|
/**
|
2014-01-17 06:58:25 +01:00
|
|
|
* Plugin API: asynchronously deletes all persistent messages up to `toSequenceNr`
|
2015-06-25 07:44:52 +02:00
|
|
|
* (inclusive).
|
2013-10-08 11:46:02 +02:00
|
|
|
*/
|
2015-06-25 07:44:52 +02:00
|
|
|
def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit]
|
2015-06-17 01:23:18 +02:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Plugin API
|
|
|
|
|
*
|
|
|
|
|
* Allows plugin implementers to use `f pipeTo self` and
|
|
|
|
|
* handle additional messages for implementing advanced features
|
|
|
|
|
*/
|
|
|
|
|
def receivePluginInternal: Actor.Receive = Actor.emptyBehavior
|
2013-10-08 11:46:02 +02:00
|
|
|
//#journal-plugin-api
|
2015-05-29 18:20:51 +02:00
|
|
|
|
2013-10-08 11:46:02 +02:00
|
|
|
}
|
|
|
|
|
|
2013-11-25 12:02:29 +01:00
|
|
|
/**
|
|
|
|
|
* INTERNAL API.
|
|
|
|
|
*/
|
2013-10-08 11:46:02 +02:00
|
|
|
private[persistence] object AsyncWriteJournal {
|
2015-06-23 21:01:36 +02:00
|
|
|
val successUnit: Success[Unit] = Success(())
|
|
|
|
|
|
2014-03-07 13:20:01 +01:00
|
|
|
final case class Desequenced(msg: Any, snr: Long, target: ActorRef, sender: ActorRef)
|
2015-06-25 19:58:47 +02:00
|
|
|
extends NoSerializationVerificationNeeded
|
2013-10-08 11:46:02 +02:00
|
|
|
|
|
|
|
|
class Resequencer extends Actor {
|
|
|
|
|
import scala.collection.mutable.Map
|
|
|
|
|
|
|
|
|
|
private val delayed = Map.empty[Long, Desequenced]
|
|
|
|
|
private var delivered = 0L
|
|
|
|
|
|
|
|
|
|
def receive = {
|
|
|
|
|
case d: Desequenced ⇒ resequence(d)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@scala.annotation.tailrec
|
|
|
|
|
private def resequence(d: Desequenced) {
|
|
|
|
|
if (d.snr == delivered + 1) {
|
|
|
|
|
delivered = d.snr
|
2014-12-08 11:02:14 +01:00
|
|
|
d.target.tell(d.msg, d.sender)
|
2013-10-08 11:46:02 +02:00
|
|
|
} else {
|
|
|
|
|
delayed += (d.snr -> d)
|
|
|
|
|
}
|
|
|
|
|
val ro = delayed.remove(delivered + 1)
|
|
|
|
|
if (ro.isDefined) resequence(ro.get)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|