Merge pull request #28155 from akka/wip-20984-reliable-delivery-patriknw
Reliable delivery in Typed, #20984
This commit is contained in:
commit
7d790ef328
61 changed files with 18784 additions and 19 deletions
|
|
@ -0,0 +1,575 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import akka.actor.testkit.typed.scaladsl.LogCapturing
|
||||
import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
|
||||
import akka.actor.typed.delivery.ConsumerController.DeliverThenStop
|
||||
import akka.actor.typed.delivery.internal.ConsumerControllerImpl
|
||||
import akka.actor.typed.delivery.internal.ProducerControllerImpl
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
class ConsumerControllerSpec
|
||||
extends ScalaTestWithActorTestKit("""
|
||||
akka.reliable-delivery.consumer-controller.flow-control-window = 20
|
||||
""")
|
||||
with AnyWordSpecLike
|
||||
with LogCapturing {
|
||||
import TestConsumer.sequencedMessage
|
||||
|
||||
private var idCount = 0
|
||||
private def nextId(): Int = {
|
||||
idCount += 1
|
||||
idCount
|
||||
}
|
||||
|
||||
private def producerId: String = s"p-$idCount"
|
||||
|
||||
private val settings = ConsumerController.Settings(system)
|
||||
import settings.flowControlWindow
|
||||
|
||||
"ConsumerController" must {
|
||||
"resend RegisterConsumer" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
consumerController ! ConsumerController.RegisterToProducerController(producerControllerProbe.ref)
|
||||
producerControllerProbe.expectMessage(ProducerController.RegisterConsumer(consumerController))
|
||||
// expected resend
|
||||
producerControllerProbe.expectMessage(ProducerController.RegisterConsumer(consumerController))
|
||||
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"resend RegisterConsumer when changed to different ProducerController" in {
|
||||
nextId()
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
val producerControllerProbe1 = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
consumerController ! ConsumerController.RegisterToProducerController(producerControllerProbe1.ref)
|
||||
producerControllerProbe1.expectMessage(ProducerController.RegisterConsumer(consumerController))
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe1.ref)
|
||||
|
||||
// change producer
|
||||
val producerControllerProbe2 = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
consumerController ! ConsumerController.RegisterToProducerController(producerControllerProbe2.ref)
|
||||
producerControllerProbe2.expectMessage(ProducerController.RegisterConsumer(consumerController))
|
||||
// expected resend
|
||||
producerControllerProbe2.expectMessage(ProducerController.RegisterConsumer(consumerController))
|
||||
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"resend initial Request" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
.unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe.ref)
|
||||
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(0, 20, true, false))
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(0, 20, true, true))
|
||||
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(1, 20, true, false))
|
||||
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"send Request after half window size" in {
|
||||
nextId()
|
||||
val windowSize = 20
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
.unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
(1 until windowSize / 2).foreach { n =>
|
||||
consumerController ! sequencedMessage(producerId, n, producerControllerProbe.ref)
|
||||
}
|
||||
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(0, windowSize, true, false))
|
||||
(1 until windowSize / 2).foreach { n =>
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
if (n == 1)
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(1, windowSize, true, false))
|
||||
}
|
||||
|
||||
producerControllerProbe.expectNoMessage()
|
||||
|
||||
consumerController ! sequencedMessage(producerId, windowSize / 2, producerControllerProbe.ref)
|
||||
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
producerControllerProbe.expectNoMessage()
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
producerControllerProbe.expectMessage(
|
||||
ProducerControllerImpl.Request(windowSize / 2, windowSize + windowSize / 2, true, false))
|
||||
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"detect lost message" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
.unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe.ref)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(0, 20, true, false))
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(1, 20, true, false))
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 2, producerControllerProbe.ref)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 5, producerControllerProbe.ref)
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Resend(3))
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 3, producerControllerProbe.ref)
|
||||
consumerController ! sequencedMessage(producerId, 4, producerControllerProbe.ref)
|
||||
consumerController ! sequencedMessage(producerId, 5, producerControllerProbe.ref)
|
||||
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(3)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(4)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(5)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"resend Request" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
.unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe.ref)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(0, 20, true, false))
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(1, 20, true, false))
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 2, producerControllerProbe.ref)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 3, producerControllerProbe.ref)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(3)
|
||||
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(2, 20, true, true))
|
||||
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(3, 20, true, true))
|
||||
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"stash while waiting for consumer confirmation" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
.unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe.ref)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(0, 20, true, false))
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(1, 20, true, false))
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 2, producerControllerProbe.ref)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
consumerController ! sequencedMessage(producerId, 3, producerControllerProbe.ref)
|
||||
consumerController ! sequencedMessage(producerId, 4, producerControllerProbe.ref)
|
||||
consumerProbe.expectNoMessage()
|
||||
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(3)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(4)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 5, producerControllerProbe.ref)
|
||||
consumerController ! sequencedMessage(producerId, 6, producerControllerProbe.ref)
|
||||
consumerController ! sequencedMessage(producerId, 7, producerControllerProbe.ref)
|
||||
|
||||
// ProducerController may resend unconfirmed
|
||||
consumerController ! sequencedMessage(producerId, 5, producerControllerProbe.ref)
|
||||
consumerController ! sequencedMessage(producerId, 6, producerControllerProbe.ref)
|
||||
consumerController ! sequencedMessage(producerId, 7, producerControllerProbe.ref)
|
||||
consumerController ! sequencedMessage(producerId, 8, producerControllerProbe.ref)
|
||||
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(5)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(6)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(7)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(8)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
consumerProbe.expectNoMessage()
|
||||
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"optionally ack messages" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
.unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe.ref, ack = true)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(0, 20, true, false))
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(1, 20, true, false))
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 2, producerControllerProbe.ref, ack = true)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Ack(2))
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 3, producerControllerProbe.ref, ack = true)
|
||||
consumerController ! sequencedMessage(producerId, 4, producerControllerProbe.ref, ack = false)
|
||||
consumerController ! sequencedMessage(producerId, 5, producerControllerProbe.ref, ack = true)
|
||||
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(3)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Ack(3))
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(4)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(5)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Ack(5))
|
||||
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"allow restart of consumer" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
.unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
val consumerProbe1 = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe1.ref)
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe.ref)
|
||||
consumerProbe1.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(0, 20, true, false))
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(1, 20, true, false))
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 2, producerControllerProbe.ref)
|
||||
consumerProbe1.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 3, producerControllerProbe.ref)
|
||||
consumerProbe1.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(3)
|
||||
|
||||
// restart consumer, before Confirmed(3)
|
||||
val consumerProbe2 = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe2.ref)
|
||||
|
||||
consumerProbe2.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(3)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 4, producerControllerProbe.ref)
|
||||
consumerProbe2.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(4)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"stop ConsumerController when consumer is stopped" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
.unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
val consumerProbe1 = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe1.ref)
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe.ref)
|
||||
consumerProbe1.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
consumerProbe1.stop()
|
||||
createTestProbe().expectTerminated(consumerController)
|
||||
}
|
||||
|
||||
"stop ConsumerController when consumer is stopped before first message" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
.unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
|
||||
val consumerProbe1 = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe1.ref)
|
||||
|
||||
consumerProbe1.stop()
|
||||
createTestProbe().expectTerminated(consumerController)
|
||||
}
|
||||
|
||||
"deduplicate resend of first message" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
.unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe.ref)
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(0, 20, true, false))
|
||||
// that Request will typically cancel the resending of first, but in unlucky timing it may happen
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe.ref)
|
||||
consumerProbe.receiveMessage().confirmTo ! ConsumerController.Confirmed
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(1, 20, true, false))
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe.ref)
|
||||
// deduplicated, not delivered again
|
||||
consumerProbe.expectNoMessage()
|
||||
|
||||
// but if the ProducerController is changed it will not be deduplicated
|
||||
val producerControllerProbe2 = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe2.ref)
|
||||
producerControllerProbe2.expectMessage(ProducerControllerImpl.Request(0, 20, true, false))
|
||||
consumerProbe.receiveMessage().confirmTo ! ConsumerController.Confirmed
|
||||
producerControllerProbe2.expectMessage(ProducerControllerImpl.Request(1, 20, true, false))
|
||||
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"request window after first" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
.unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe.ref)
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(0, flowControlWindow, true, false))
|
||||
consumerProbe.receiveMessage().confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
// and if the ProducerController is changed
|
||||
val producerControllerProbe2 = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
consumerController ! sequencedMessage(producerId, 23, producerControllerProbe2.ref).asFirst
|
||||
producerControllerProbe2.expectMessage(ProducerControllerImpl.Request(0, 23 + flowControlWindow - 1, true, false))
|
||||
consumerProbe.receiveMessage().confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val producerControllerProbe3 = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
consumerController ! sequencedMessage(producerId, 7, producerControllerProbe3.ref).asFirst
|
||||
producerControllerProbe3.expectMessage(ProducerControllerImpl.Request(0, 7 + flowControlWindow - 1, true, false))
|
||||
consumerProbe.receiveMessage().confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"handle first message when waiting for lost (resending)" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
.unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
|
||||
// while waiting for Start the SequencedMessage will be stashed
|
||||
consumerController ! sequencedMessage(producerId, 44, producerControllerProbe.ref)
|
||||
consumerController ! sequencedMessage(producerId, 41, producerControllerProbe.ref).asFirst
|
||||
consumerController ! sequencedMessage(producerId, 45, producerControllerProbe.ref)
|
||||
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
// unstashed 44, 41, 45
|
||||
// 44 is not first so will trigger a full Resend
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Resend(0))
|
||||
// and 41 is first, which will trigger the initial Request
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(0, 60, true, false))
|
||||
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(41)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(41, 60, true, false))
|
||||
|
||||
// 45 not expected
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Resend(42))
|
||||
|
||||
// from previous Resend request
|
||||
consumerController ! sequencedMessage(producerId, 42, producerControllerProbe.ref)
|
||||
consumerController ! sequencedMessage(producerId, 43, producerControllerProbe.ref)
|
||||
consumerController ! sequencedMessage(producerId, 44, producerControllerProbe.ref)
|
||||
consumerController ! sequencedMessage(producerId, 45, producerControllerProbe.ref)
|
||||
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(42)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(43)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(44)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(45)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 46, producerControllerProbe.ref)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(46)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"send Ack when stopped" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
.unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
val consumerProbe1 = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe1.ref)
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe.ref)
|
||||
consumerProbe1.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
producerControllerProbe.expectMessageType[ProducerControllerImpl.Request]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
producerControllerProbe.expectMessageType[ProducerControllerImpl.Request]
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 2, producerControllerProbe.ref)
|
||||
consumerProbe1.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
testKit.stop(consumerController)
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Ack(2L))
|
||||
}
|
||||
|
||||
"support graceful stopping" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
.unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe.ref)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
producerControllerProbe.expectMessageType[ProducerControllerImpl.Request]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
producerControllerProbe.expectMessageType[ProducerControllerImpl.Request]
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 2, producerControllerProbe.ref)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].message should ===(
|
||||
TestConsumer.Job("msg-2"))
|
||||
consumerController ! sequencedMessage(producerId, 3, producerControllerProbe.ref)
|
||||
consumerController ! sequencedMessage(producerId, 4, producerControllerProbe.ref)
|
||||
|
||||
consumerController ! DeliverThenStop()
|
||||
|
||||
consumerController ! ConsumerController.Confirmed // 2
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].message should ===(
|
||||
TestConsumer.Job("msg-3"))
|
||||
consumerController ! sequencedMessage(producerId, 5, producerControllerProbe.ref)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].message should ===(
|
||||
TestConsumer.Job("msg-4"))
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].message should ===(
|
||||
TestConsumer.Job("msg-5"))
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
consumerProbe.expectTerminated(consumerController)
|
||||
|
||||
testKit.stop(consumerController)
|
||||
// one Ack from postStop, and another from Behaviors.stopped callback after final Confirmed
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Ack(4L))
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Ack(5L))
|
||||
}
|
||||
}
|
||||
|
||||
"ConsumerController without resending" must {
|
||||
"accept lost message" in {
|
||||
nextId()
|
||||
val consumerController =
|
||||
spawn(
|
||||
ConsumerController[TestConsumer.Job](ConsumerController.Settings(system).withOnlyFlowControl(true)),
|
||||
s"consumerController-${idCount}").unsafeUpcast[ConsumerControllerImpl.InternalCommand]
|
||||
val producerControllerProbe = createTestProbe[ProducerControllerImpl.InternalCommand]()
|
||||
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
consumerController ! sequencedMessage(producerId, 1, producerControllerProbe.ref)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]]
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(0, 20, supportResend = false, false))
|
||||
producerControllerProbe.expectMessage(ProducerControllerImpl.Request(1, 20, supportResend = false, false))
|
||||
|
||||
// skipping 2
|
||||
consumerController ! sequencedMessage(producerId, 3, producerControllerProbe.ref)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(3)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
consumerController ! sequencedMessage(producerId, 4, producerControllerProbe.ref)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(4)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
// skip many
|
||||
consumerController ! sequencedMessage(producerId, 35, producerControllerProbe.ref)
|
||||
consumerProbe.expectMessageType[ConsumerController.Delivery[TestConsumer.Job]].seqNr should ===(35)
|
||||
consumerController ! ConsumerController.Confirmed
|
||||
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,160 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.actor.testkit.typed.scaladsl.LogCapturing
|
||||
import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
|
||||
import DurableProducerQueue.MessageSent
|
||||
import ProducerController.MessageWithConfirmation
|
||||
import akka.actor.typed.delivery.internal.ProducerControllerImpl
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
class DurableProducerControllerSpec
|
||||
extends ScalaTestWithActorTestKit("""
|
||||
akka.reliable-delivery.consumer-controller.flow-control-window = 20
|
||||
""")
|
||||
with AnyWordSpecLike
|
||||
with LogCapturing {
|
||||
import TestConsumer.sequencedMessage
|
||||
import DurableProducerQueue.NoQualifier
|
||||
import TestDurableProducerQueue.TestTimestamp
|
||||
|
||||
private var idCount = 0
|
||||
private def nextId(): Int = {
|
||||
idCount += 1
|
||||
idCount
|
||||
}
|
||||
|
||||
private def producerId: String = s"p-$idCount"
|
||||
|
||||
"ProducerController with durable queue" must {
|
||||
|
||||
"load initial state and resend unconfirmed" in {
|
||||
nextId()
|
||||
val consumerControllerProbe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
|
||||
val durable = TestDurableProducerQueue[TestConsumer.Job](
|
||||
Duration.Zero,
|
||||
DurableProducerQueue.State(
|
||||
currentSeqNr = 5,
|
||||
highestConfirmedSeqNr = 2,
|
||||
confirmedSeqNr = Map(NoQualifier -> (2L -> TestTimestamp)),
|
||||
unconfirmed = Vector(
|
||||
DurableProducerQueue.MessageSent(3, TestConsumer.Job("msg-3"), false, NoQualifier, TestTimestamp),
|
||||
DurableProducerQueue.MessageSent(4, TestConsumer.Job("msg-4"), false, NoQualifier, TestTimestamp))))
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](producerId, Some(durable)), s"producerController-${idCount}")
|
||||
.unsafeUpcast[ProducerControllerImpl.InternalCommand]
|
||||
val producerProbe = createTestProbe[ProducerController.RequestNext[TestConsumer.Job]]()
|
||||
producerController ! ProducerController.Start(producerProbe.ref)
|
||||
|
||||
producerController ! ProducerController.RegisterConsumer(consumerControllerProbe.ref)
|
||||
|
||||
// no request to producer since it has unconfirmed to begin with
|
||||
producerProbe.expectNoMessage()
|
||||
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 3, producerController).asFirst)
|
||||
consumerControllerProbe.expectNoMessage(50.millis)
|
||||
producerController ! ProducerControllerImpl.Request(3L, 13L, true, false)
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 4, producerController))
|
||||
|
||||
val sendTo = producerProbe.receiveMessage().sendNextTo
|
||||
sendTo ! TestConsumer.Job("msg-5")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 5, producerController))
|
||||
|
||||
testKit.stop(producerController)
|
||||
}
|
||||
|
||||
"store confirmations" in {
|
||||
nextId()
|
||||
val consumerControllerProbe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
|
||||
val stateHolder =
|
||||
new AtomicReference[DurableProducerQueue.State[TestConsumer.Job]](DurableProducerQueue.State.empty)
|
||||
val durable = TestDurableProducerQueue[TestConsumer.Job](
|
||||
Duration.Zero,
|
||||
stateHolder,
|
||||
(_: DurableProducerQueue.Command[_]) => false)
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](producerId, Some(durable)), s"producerController-${idCount}")
|
||||
.unsafeUpcast[ProducerControllerImpl.InternalCommand]
|
||||
val producerProbe = createTestProbe[ProducerController.RequestNext[TestConsumer.Job]]()
|
||||
producerController ! ProducerController.Start(producerProbe.ref)
|
||||
|
||||
producerController ! ProducerController.RegisterConsumer(consumerControllerProbe.ref)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-1")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 1, producerController))
|
||||
producerProbe.awaitAssert {
|
||||
stateHolder.get() should ===(
|
||||
DurableProducerQueue.State(
|
||||
2,
|
||||
0,
|
||||
Map.empty,
|
||||
Vector(MessageSent(1, TestConsumer.Job("msg-1"), ack = false, NoQualifier, TestTimestamp))))
|
||||
}
|
||||
producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
producerProbe.awaitAssert {
|
||||
stateHolder.get() should ===(
|
||||
DurableProducerQueue.State(2, 1, Map(NoQualifier -> (1L -> TestTimestamp)), Vector.empty))
|
||||
}
|
||||
|
||||
val replyTo = createTestProbe[Long]()
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-2"), replyTo.ref)
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 2, producerController, ack = true))
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-3"), replyTo.ref)
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 3, producerController, ack = true))
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-4"), replyTo.ref)
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 4, producerController, ack = true))
|
||||
producerController ! ProducerControllerImpl.Ack(3)
|
||||
producerProbe.awaitAssert {
|
||||
stateHolder.get() should ===(
|
||||
DurableProducerQueue.State(
|
||||
5,
|
||||
3,
|
||||
Map(NoQualifier -> (3L -> TestTimestamp)),
|
||||
Vector(MessageSent(4, TestConsumer.Job("msg-4"), ack = true, NoQualifier, TestTimestamp))))
|
||||
}
|
||||
|
||||
testKit.stop(producerController)
|
||||
}
|
||||
|
||||
"reply to MessageWithConfirmation after storage" in {
|
||||
nextId()
|
||||
val consumerControllerProbe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
|
||||
val durable =
|
||||
TestDurableProducerQueue[TestConsumer.Job](Duration.Zero, DurableProducerQueue.State.empty[TestConsumer.Job])
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](producerId, Some(durable)), s"producerController-${idCount}")
|
||||
.unsafeUpcast[ProducerControllerImpl.InternalCommand]
|
||||
val producerProbe = createTestProbe[ProducerController.RequestNext[TestConsumer.Job]]()
|
||||
producerController ! ProducerController.Start(producerProbe.ref)
|
||||
|
||||
producerController ! ProducerController.RegisterConsumer(consumerControllerProbe.ref)
|
||||
|
||||
val replyTo = createTestProbe[Long]()
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-1"), replyTo.ref)
|
||||
replyTo.expectMessage(1L)
|
||||
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 1, producerController, ack = true))
|
||||
producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-2"), replyTo.ref)
|
||||
replyTo.expectMessage(2L)
|
||||
|
||||
testKit.stop(producerController)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,395 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.Done
|
||||
import akka.actor.testkit.typed.scaladsl.LogCapturing
|
||||
import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
|
||||
import akka.actor.typed.ActorRef
|
||||
import DurableProducerQueue.MessageSent
|
||||
import akka.actor.typed.delivery.internal.ProducerControllerImpl
|
||||
import akka.actor.typed.receptionist.Receptionist
|
||||
import akka.actor.typed.receptionist.ServiceKey
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
class DurableWorkPullingSpec
|
||||
extends ScalaTestWithActorTestKit("""
|
||||
akka.reliable-delivery.consumer-controller.flow-control-window = 20
|
||||
""")
|
||||
with AnyWordSpecLike
|
||||
with LogCapturing {
|
||||
import DurableProducerQueue.NoQualifier
|
||||
import TestDurableProducerQueue.TestTimestamp
|
||||
|
||||
private var idCount = 0
|
||||
private def nextId(): Int = {
|
||||
idCount += 1
|
||||
idCount
|
||||
}
|
||||
|
||||
private def producerId: String = s"p-$idCount"
|
||||
|
||||
private def awaitWorkersRegistered(
|
||||
controller: ActorRef[WorkPullingProducerController.Command[TestConsumer.Job]],
|
||||
count: Int): Unit = {
|
||||
val probe = createTestProbe[WorkPullingProducerController.WorkerStats]()
|
||||
probe.awaitAssert {
|
||||
controller ! WorkPullingProducerController.GetWorkerStats(probe.ref)
|
||||
probe.receiveMessage().numberOfWorkers should ===(count)
|
||||
}
|
||||
}
|
||||
|
||||
val workerServiceKey: ServiceKey[ConsumerController.Command[TestConsumer.Job]] = ServiceKey("worker")
|
||||
|
||||
// don't compare the UUID fields
|
||||
private def assertState(
|
||||
s: DurableProducerQueue.State[TestConsumer.Job],
|
||||
expected: DurableProducerQueue.State[TestConsumer.Job]): Unit = {
|
||||
|
||||
def cleanup(a: DurableProducerQueue.State[TestConsumer.Job]) =
|
||||
a.copy(
|
||||
confirmedSeqNr = Map.empty,
|
||||
unconfirmed = s.unconfirmed.map(m => m.copy(confirmationQualifier = DurableProducerQueue.NoQualifier)))
|
||||
|
||||
cleanup(s) should ===(cleanup(expected))
|
||||
}
|
||||
|
||||
"ReliableDelivery with work-pulling and durable queue" must {
|
||||
|
||||
"load initial state and resend unconfirmed" in {
|
||||
nextId()
|
||||
|
||||
val durable = TestDurableProducerQueue[TestConsumer.Job](
|
||||
Duration.Zero,
|
||||
DurableProducerQueue.State(
|
||||
currentSeqNr = 5,
|
||||
highestConfirmedSeqNr = 2,
|
||||
confirmedSeqNr = Map(NoQualifier -> (2L -> TestTimestamp)),
|
||||
unconfirmed = Vector(
|
||||
DurableProducerQueue.MessageSent(3, TestConsumer.Job("msg-3"), false, NoQualifier, TestTimestamp),
|
||||
DurableProducerQueue.MessageSent(4, TestConsumer.Job("msg-4"), false, NoQualifier, TestTimestamp))))
|
||||
|
||||
val workPullingController =
|
||||
spawn(
|
||||
WorkPullingProducerController[TestConsumer.Job](producerId, workerServiceKey, Some(durable)),
|
||||
s"workPullingController-${idCount}")
|
||||
val producerProbe = createTestProbe[WorkPullingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
workPullingController ! WorkPullingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val workerController1Probe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
system.receptionist ! Receptionist.Register(workerServiceKey, workerController1Probe.ref)
|
||||
awaitWorkersRegistered(workPullingController, 1)
|
||||
|
||||
// no request to producer since it has unconfirmed to begin with
|
||||
producerProbe.expectNoMessage()
|
||||
|
||||
val seqMsg3 = workerController1Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg3.message should ===(TestConsumer.Job("msg-3"))
|
||||
seqMsg3.producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
|
||||
workerController1Probe
|
||||
.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
.message should ===(TestConsumer.Job("msg-4"))
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-5")
|
||||
|
||||
workerController1Probe.stop()
|
||||
awaitWorkersRegistered(workPullingController, 0)
|
||||
testKit.stop(workPullingController)
|
||||
}
|
||||
|
||||
"reply to MessageWithConfirmation after storage" in {
|
||||
import WorkPullingProducerController.MessageWithConfirmation
|
||||
nextId()
|
||||
val durable =
|
||||
TestDurableProducerQueue[TestConsumer.Job](Duration.Zero, DurableProducerQueue.State.empty[TestConsumer.Job])
|
||||
val workPullingController =
|
||||
spawn(
|
||||
WorkPullingProducerController[TestConsumer.Job](producerId, workerServiceKey, Some(durable)),
|
||||
s"workPullingController-${idCount}")
|
||||
val producerProbe = createTestProbe[WorkPullingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
workPullingController ! WorkPullingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val workerController1Probe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
system.receptionist ! Receptionist.Register(workerServiceKey, workerController1Probe.ref)
|
||||
awaitWorkersRegistered(workPullingController, 1)
|
||||
|
||||
val replyProbe = createTestProbe[Done]()
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-1"), replyProbe.ref)
|
||||
val seqMsg1 = workerController1Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg1.message should ===(TestConsumer.Job("msg-1"))
|
||||
seqMsg1.ack should ===(true)
|
||||
seqMsg1.producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
replyProbe.receiveMessage()
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-2"), replyProbe.ref)
|
||||
// reply after storage, doesn't wait for ack from consumer
|
||||
replyProbe.receiveMessage()
|
||||
val seqMsg2 = workerController1Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg2.message should ===(TestConsumer.Job("msg-2"))
|
||||
seqMsg2.ack should ===(true)
|
||||
seqMsg2.producerController ! ProducerControllerImpl.Ack(2L)
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-3"), replyProbe.ref)
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-4"), replyProbe.ref)
|
||||
replyProbe.receiveMessages(2)
|
||||
workerController1Probe.receiveMessages(2)
|
||||
seqMsg2.producerController ! ProducerControllerImpl.Ack(4L)
|
||||
|
||||
workerController1Probe.stop()
|
||||
awaitWorkersRegistered(workPullingController, 0)
|
||||
testKit.stop(workPullingController)
|
||||
}
|
||||
|
||||
"store confirmations" in {
|
||||
import WorkPullingProducerController.MessageWithConfirmation
|
||||
nextId()
|
||||
|
||||
val stateHolder =
|
||||
new AtomicReference[DurableProducerQueue.State[TestConsumer.Job]](DurableProducerQueue.State.empty)
|
||||
val durable = TestDurableProducerQueue[TestConsumer.Job](
|
||||
Duration.Zero,
|
||||
stateHolder,
|
||||
(_: DurableProducerQueue.Command[_]) => false)
|
||||
|
||||
val workPullingController =
|
||||
spawn(
|
||||
WorkPullingProducerController[TestConsumer.Job](producerId, workerServiceKey, Some(durable)),
|
||||
s"workPullingController-${idCount}")
|
||||
val producerProbe = createTestProbe[WorkPullingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
workPullingController ! WorkPullingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val workerController1Probe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
system.receptionist ! Receptionist.Register(workerServiceKey, workerController1Probe.ref)
|
||||
awaitWorkersRegistered(workPullingController, 1)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-1")
|
||||
producerProbe.awaitAssert {
|
||||
assertState(
|
||||
stateHolder.get(),
|
||||
DurableProducerQueue.State(
|
||||
2,
|
||||
0,
|
||||
Map.empty,
|
||||
Vector(MessageSent(1, TestConsumer.Job("msg-1"), ack = false, NoQualifier, TestTimestamp))))
|
||||
}
|
||||
val seqMsg1 = workerController1Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg1.message should ===(TestConsumer.Job("msg-1"))
|
||||
seqMsg1.producerController ! ProducerControllerImpl.Request(1L, 5L, true, false)
|
||||
producerProbe.awaitAssert {
|
||||
assertState(stateHolder.get(), DurableProducerQueue.State(2, 1, Map.empty, Vector.empty))
|
||||
}
|
||||
|
||||
val replyTo = createTestProbe[Done]()
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-2"), replyTo.ref)
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-3")
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-4"), replyTo.ref)
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-5")
|
||||
workerController1Probe.receiveMessage() // msg-2
|
||||
workerController1Probe.receiveMessage() // msg-3
|
||||
workerController1Probe.receiveMessage() // msg-4
|
||||
val seqMsg5 = workerController1Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg5.seqNr should ===(5)
|
||||
|
||||
// no more demand, since 5 messages sent but no Ack
|
||||
producerProbe.expectNoMessage()
|
||||
producerProbe.awaitAssert {
|
||||
assertState(
|
||||
stateHolder.get(),
|
||||
DurableProducerQueue.State(
|
||||
6,
|
||||
1,
|
||||
Map.empty,
|
||||
Vector(
|
||||
MessageSent(2, TestConsumer.Job("msg-2"), ack = true, NoQualifier, TestTimestamp),
|
||||
MessageSent(3, TestConsumer.Job("msg-3"), ack = false, NoQualifier, TestTimestamp),
|
||||
MessageSent(4, TestConsumer.Job("msg-4"), ack = true, NoQualifier, TestTimestamp),
|
||||
MessageSent(5, TestConsumer.Job("msg-5"), ack = false, NoQualifier, TestTimestamp))))
|
||||
}
|
||||
|
||||
// start another worker
|
||||
val workerController2Probe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
system.receptionist ! Receptionist.Register(workerServiceKey, workerController2Probe.ref)
|
||||
awaitWorkersRegistered(workPullingController, 2)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-6")
|
||||
producerProbe.awaitAssert {
|
||||
assertState(
|
||||
stateHolder.get(),
|
||||
DurableProducerQueue.State(
|
||||
7,
|
||||
1,
|
||||
Map.empty,
|
||||
Vector(
|
||||
MessageSent(2, TestConsumer.Job("msg-2"), ack = true, NoQualifier, TestTimestamp),
|
||||
MessageSent(3, TestConsumer.Job("msg-3"), ack = false, NoQualifier, TestTimestamp),
|
||||
MessageSent(4, TestConsumer.Job("msg-4"), ack = true, NoQualifier, TestTimestamp),
|
||||
MessageSent(5, TestConsumer.Job("msg-5"), ack = false, NoQualifier, TestTimestamp),
|
||||
MessageSent(6, TestConsumer.Job("msg-6"), ack = false, NoQualifier, TestTimestamp))))
|
||||
}
|
||||
val seqMsg6 = workerController2Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg6.message should ===(TestConsumer.Job("msg-6"))
|
||||
seqMsg6.seqNr should ===(1) // different ProducerController-ConsumerController
|
||||
seqMsg6.producerController ! ProducerControllerImpl.Request(1L, 5L, true, false)
|
||||
producerProbe.awaitAssert {
|
||||
assertState(
|
||||
stateHolder.get(),
|
||||
DurableProducerQueue.State(
|
||||
7,
|
||||
6,
|
||||
Map.empty,
|
||||
Vector(
|
||||
MessageSent(2, TestConsumer.Job("msg-2"), ack = true, NoQualifier, TestTimestamp),
|
||||
MessageSent(3, TestConsumer.Job("msg-3"), ack = false, NoQualifier, TestTimestamp),
|
||||
MessageSent(4, TestConsumer.Job("msg-4"), ack = true, NoQualifier, TestTimestamp),
|
||||
MessageSent(5, TestConsumer.Job("msg-5"), ack = false, NoQualifier, TestTimestamp))))
|
||||
}
|
||||
|
||||
seqMsg1.producerController ! ProducerControllerImpl.Ack(3)
|
||||
producerProbe.awaitAssert {
|
||||
assertState(
|
||||
stateHolder.get(),
|
||||
DurableProducerQueue.State(
|
||||
7,
|
||||
6,
|
||||
Map.empty,
|
||||
Vector(
|
||||
MessageSent(4, TestConsumer.Job("msg-4"), ack = true, NoQualifier, TestTimestamp),
|
||||
MessageSent(5, TestConsumer.Job("msg-5"), ack = false, NoQualifier, TestTimestamp))))
|
||||
}
|
||||
|
||||
workerController1Probe.stop()
|
||||
workerController2Probe.stop()
|
||||
awaitWorkersRegistered(workPullingController, 0)
|
||||
testKit.stop(workPullingController)
|
||||
}
|
||||
|
||||
"hand over, and resend unconfirmed when worker is unregistered" in {
|
||||
nextId()
|
||||
|
||||
val stateHolder =
|
||||
new AtomicReference[DurableProducerQueue.State[TestConsumer.Job]](DurableProducerQueue.State.empty)
|
||||
val durable = TestDurableProducerQueue[TestConsumer.Job](
|
||||
Duration.Zero,
|
||||
stateHolder,
|
||||
(_: DurableProducerQueue.Command[_]) => false)
|
||||
|
||||
val workPullingController =
|
||||
spawn(
|
||||
WorkPullingProducerController[TestConsumer.Job](producerId, workerServiceKey, Some(durable)),
|
||||
s"workPullingController-${idCount}")
|
||||
val producerProbe = createTestProbe[WorkPullingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
workPullingController ! WorkPullingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val workerController1Probe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
system.receptionist ! Receptionist.Register(workerServiceKey, workerController1Probe.ref)
|
||||
awaitWorkersRegistered(workPullingController, 1)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-1")
|
||||
producerProbe.awaitAssert {
|
||||
assertState(
|
||||
stateHolder.get(),
|
||||
DurableProducerQueue.State(
|
||||
2,
|
||||
0,
|
||||
Map.empty,
|
||||
Vector(MessageSent(1, TestConsumer.Job("msg-1"), ack = false, NoQualifier, TestTimestamp))))
|
||||
}
|
||||
val seqMsg1 = workerController1Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg1.message should ===(TestConsumer.Job("msg-1"))
|
||||
seqMsg1.producerController ! ProducerControllerImpl.Request(1L, 5L, true, false)
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-2")
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-3")
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-4")
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-5")
|
||||
workerController1Probe.receiveMessage() // msg-2
|
||||
workerController1Probe.receiveMessage() // msg-3
|
||||
workerController1Probe.receiveMessage() // msg-4
|
||||
workerController1Probe.receiveMessage() // msg-5
|
||||
|
||||
// no more demand, since 5 messages sent but no Ack
|
||||
producerProbe.expectNoMessage()
|
||||
producerProbe.awaitAssert {
|
||||
assertState(
|
||||
stateHolder.get(),
|
||||
DurableProducerQueue.State(
|
||||
6,
|
||||
1,
|
||||
Map.empty,
|
||||
Vector(
|
||||
MessageSent(2, TestConsumer.Job("msg-2"), ack = false, NoQualifier, TestTimestamp),
|
||||
MessageSent(3, TestConsumer.Job("msg-3"), ack = false, NoQualifier, TestTimestamp),
|
||||
MessageSent(4, TestConsumer.Job("msg-4"), ack = false, NoQualifier, TestTimestamp),
|
||||
MessageSent(5, TestConsumer.Job("msg-5"), ack = false, NoQualifier, TestTimestamp))))
|
||||
}
|
||||
|
||||
// start another worker
|
||||
val workerController2Probe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
system.receptionist ! Receptionist.Register(workerServiceKey, workerController2Probe.ref)
|
||||
awaitWorkersRegistered(workPullingController, 2)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-6")
|
||||
val seqMsg6 = workerController2Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg6.message should ===(TestConsumer.Job("msg-6"))
|
||||
// note that it's only requesting 3
|
||||
seqMsg6.producerController ! ProducerControllerImpl.Request(1L, 3L, true, false)
|
||||
producerProbe.awaitAssert {
|
||||
assertState(
|
||||
stateHolder.get(),
|
||||
DurableProducerQueue.State(
|
||||
7,
|
||||
6,
|
||||
Map.empty,
|
||||
Vector(
|
||||
MessageSent(2, TestConsumer.Job("msg-2"), ack = true, NoQualifier, TestTimestamp),
|
||||
MessageSent(3, TestConsumer.Job("msg-3"), ack = false, NoQualifier, TestTimestamp),
|
||||
MessageSent(4, TestConsumer.Job("msg-4"), ack = true, NoQualifier, TestTimestamp),
|
||||
MessageSent(5, TestConsumer.Job("msg-5"), ack = false, NoQualifier, TestTimestamp))))
|
||||
}
|
||||
|
||||
workerController1Probe.stop()
|
||||
awaitWorkersRegistered(workPullingController, 1)
|
||||
|
||||
// msg-2, msg-3, msg-4, msg-5 were originally sent to worker1, but not confirmed
|
||||
// so they will be resent and delivered to worker2
|
||||
val seqMsg7 = workerController2Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg7.message should ===(TestConsumer.Job("msg-2"))
|
||||
val seqMsg8 = workerController2Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg8.message should ===(TestConsumer.Job("msg-3"))
|
||||
seqMsg8.seqNr should ===(3)
|
||||
// but it has only requested 3 so no more
|
||||
workerController2Probe.expectNoMessage()
|
||||
// then request more, and confirm 3
|
||||
seqMsg8.producerController ! ProducerControllerImpl.Request(3L, 10L, true, false)
|
||||
val seqMsg9 = workerController2Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg9.message should ===(TestConsumer.Job("msg-4"))
|
||||
val seqMsg10 = workerController2Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg10.message should ===(TestConsumer.Job("msg-5"))
|
||||
|
||||
seqMsg9.producerController ! ProducerControllerImpl.Ack(seqMsg9.seqNr)
|
||||
producerProbe.awaitAssert {
|
||||
assertState(
|
||||
stateHolder.get(),
|
||||
DurableProducerQueue.State(
|
||||
11,
|
||||
9,
|
||||
Map.empty,
|
||||
Vector(
|
||||
// note that it has a different seqNr than before
|
||||
MessageSent(10, TestConsumer.Job("msg-5"), ack = false, NoQualifier, TestTimestamp))))
|
||||
}
|
||||
|
||||
workerController1Probe.stop()
|
||||
workerController2Probe.stop()
|
||||
awaitWorkersRegistered(workPullingController, 0)
|
||||
testKit.stop(workPullingController)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,333 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.actor.testkit.typed.scaladsl.LogCapturing
|
||||
import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
|
||||
import ProducerController.MessageWithConfirmation
|
||||
import akka.actor.typed.delivery.internal.ProducerControllerImpl
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
class ProducerControllerSpec
|
||||
extends ScalaTestWithActorTestKit("""
|
||||
akka.reliable-delivery.consumer-controller.flow-control-window = 20
|
||||
""")
|
||||
with AnyWordSpecLike
|
||||
with LogCapturing {
|
||||
import TestConsumer.sequencedMessage
|
||||
|
||||
private var idCount = 0
|
||||
private def nextId(): Int = {
|
||||
idCount += 1
|
||||
idCount
|
||||
}
|
||||
|
||||
private def producerId: String = s"p-$idCount"
|
||||
|
||||
"ProducerController" must {
|
||||
|
||||
"resend lost initial SequencedMessage" in {
|
||||
nextId()
|
||||
val consumerControllerProbe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](producerId, None), s"producerController-${idCount}")
|
||||
val producerProbe = createTestProbe[ProducerController.RequestNext[TestConsumer.Job]]()
|
||||
producerController ! ProducerController.Start(producerProbe.ref)
|
||||
|
||||
producerController ! ProducerController.RegisterConsumer(consumerControllerProbe.ref)
|
||||
|
||||
val sendTo = producerProbe.receiveMessage().sendNextTo
|
||||
sendTo ! TestConsumer.Job("msg-1")
|
||||
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 1, producerController))
|
||||
|
||||
// the ConsumerController will send initial `Request` back, but if that is lost or if the first
|
||||
// `SequencedMessage` is lost the ProducerController will resend the SequencedMessage
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 1, producerController))
|
||||
|
||||
val internalProducerController = producerController.unsafeUpcast[ProducerControllerImpl.InternalCommand]
|
||||
internalProducerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
consumerControllerProbe.expectNoMessage(1100.millis)
|
||||
|
||||
testKit.stop(producerController)
|
||||
}
|
||||
|
||||
"resend lost SequencedMessage when receiving Resend" in {
|
||||
nextId()
|
||||
val consumerControllerProbe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](producerId, None), s"producerController-${idCount}")
|
||||
.unsafeUpcast[ProducerControllerImpl.InternalCommand]
|
||||
val producerProbe = createTestProbe[ProducerController.RequestNext[TestConsumer.Job]]()
|
||||
producerController ! ProducerController.Start(producerProbe.ref)
|
||||
|
||||
producerController ! ProducerController.RegisterConsumer(consumerControllerProbe.ref)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-1")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 1, producerController))
|
||||
|
||||
producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-2")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 2, producerController))
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-3")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 3, producerController))
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-4")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 4, producerController))
|
||||
|
||||
// let's say 3 is lost, when 4 is received the ConsumerController detects the gap and sends Resend(3)
|
||||
producerController ! ProducerControllerImpl.Resend(3)
|
||||
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 3, producerController))
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 4, producerController))
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-5")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 5, producerController))
|
||||
|
||||
testKit.stop(producerController)
|
||||
}
|
||||
|
||||
"resend last lost SequencedMessage when receiving Request" in {
|
||||
nextId()
|
||||
val consumerControllerProbe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](producerId, None), s"producerController-${idCount}")
|
||||
.unsafeUpcast[ProducerControllerImpl.InternalCommand]
|
||||
val producerProbe = createTestProbe[ProducerController.RequestNext[TestConsumer.Job]]()
|
||||
producerController ! ProducerController.Start(producerProbe.ref)
|
||||
|
||||
producerController ! ProducerController.RegisterConsumer(consumerControllerProbe.ref)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-1")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 1, producerController))
|
||||
|
||||
producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-2")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 2, producerController))
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-3")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 3, producerController))
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-4")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 4, producerController))
|
||||
|
||||
// let's say 3 and 4 are lost, and no more messages are sent from producer
|
||||
// ConsumerController will resend Request periodically
|
||||
producerController ! ProducerControllerImpl.Request(2L, 10L, true, true)
|
||||
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 3, producerController))
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 4, producerController))
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-5")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 5, producerController))
|
||||
|
||||
testKit.stop(producerController)
|
||||
}
|
||||
|
||||
"support registration of new ConsumerController" in {
|
||||
nextId()
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](producerId, None), s"producerController-${idCount}")
|
||||
.unsafeUpcast[ProducerControllerImpl.InternalCommand]
|
||||
val producerProbe = createTestProbe[ProducerController.RequestNext[TestConsumer.Job]]()
|
||||
producerController ! ProducerController.Start(producerProbe.ref)
|
||||
|
||||
val consumerControllerProbe1 = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
producerController ! ProducerController.RegisterConsumer(consumerControllerProbe1.ref)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-1")
|
||||
consumerControllerProbe1.expectMessage(sequencedMessage(producerId, 1, producerController))
|
||||
|
||||
producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-2")
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-3")
|
||||
|
||||
val consumerControllerProbe2 = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
producerController ! ProducerController.RegisterConsumer(consumerControllerProbe2.ref)
|
||||
|
||||
consumerControllerProbe2.expectMessage(sequencedMessage(producerId, 2, producerController).asFirst)
|
||||
consumerControllerProbe2.expectNoMessage(100.millis)
|
||||
// if no Request confirming the first (seqNr=2) it will resend it
|
||||
consumerControllerProbe2.expectMessage(sequencedMessage(producerId, 2, producerController).asFirst)
|
||||
|
||||
producerController ! ProducerControllerImpl.Request(2L, 10L, true, false)
|
||||
// then the other unconfirmed should be resent
|
||||
consumerControllerProbe2.expectMessage(sequencedMessage(producerId, 3, producerController))
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-4")
|
||||
consumerControllerProbe2.expectMessage(sequencedMessage(producerId, 4, producerController))
|
||||
|
||||
testKit.stop(producerController)
|
||||
}
|
||||
|
||||
"reply to MessageWithConfirmation" in {
|
||||
nextId()
|
||||
val consumerControllerProbe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](producerId, None), s"producerController-${idCount}")
|
||||
.unsafeUpcast[ProducerControllerImpl.InternalCommand]
|
||||
val producerProbe = createTestProbe[ProducerController.RequestNext[TestConsumer.Job]]()
|
||||
producerController ! ProducerController.Start(producerProbe.ref)
|
||||
|
||||
producerController ! ProducerController.RegisterConsumer(consumerControllerProbe.ref)
|
||||
|
||||
val replyTo = createTestProbe[Long]()
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-1"), replyTo.ref)
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 1, producerController, ack = true))
|
||||
producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
replyTo.expectMessage(1L)
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-2"), replyTo.ref)
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 2, producerController, ack = true))
|
||||
producerController ! ProducerControllerImpl.Ack(2L)
|
||||
replyTo.expectMessage(2L)
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-3"), replyTo.ref)
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-4"), replyTo.ref)
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 3, producerController, ack = true))
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 4, producerController, ack = true))
|
||||
// Ack(3 lost, but Ack(4) triggers reply for 3 and 4
|
||||
producerController ! ProducerControllerImpl.Ack(4L)
|
||||
replyTo.expectMessage(3L)
|
||||
replyTo.expectMessage(4L)
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-5"), replyTo.ref)
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 5, producerController, ack = true))
|
||||
// Ack(5) lost, but eventually a Request will trigger the reply
|
||||
producerController ! ProducerControllerImpl.Request(5L, 15L, true, false)
|
||||
replyTo.expectMessage(5L)
|
||||
|
||||
testKit.stop(producerController)
|
||||
}
|
||||
|
||||
"allow restart of producer" in {
|
||||
nextId()
|
||||
val consumerControllerProbe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](producerId, None), s"producerController-${idCount}")
|
||||
.unsafeUpcast[ProducerControllerImpl.InternalCommand]
|
||||
val producerProbe1 = createTestProbe[ProducerController.RequestNext[TestConsumer.Job]]()
|
||||
producerController ! ProducerController.Start(producerProbe1.ref)
|
||||
|
||||
producerController ! ProducerController.RegisterConsumer(consumerControllerProbe.ref)
|
||||
|
||||
producerProbe1.receiveMessage().sendNextTo ! TestConsumer.Job("msg-1")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 1, producerController))
|
||||
producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
|
||||
producerProbe1.receiveMessage().sendNextTo ! TestConsumer.Job("msg-2")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 2, producerController))
|
||||
|
||||
producerProbe1.receiveMessage().currentSeqNr should ===(3)
|
||||
|
||||
// restart producer, new Start
|
||||
val producerProbe2 = createTestProbe[ProducerController.RequestNext[TestConsumer.Job]]()
|
||||
producerController ! ProducerController.Start(producerProbe2.ref)
|
||||
|
||||
producerProbe2.receiveMessage().sendNextTo ! TestConsumer.Job("msg-3")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 3, producerController))
|
||||
|
||||
producerProbe2.receiveMessage().sendNextTo ! TestConsumer.Job("msg-4")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 4, producerController))
|
||||
|
||||
testKit.stop(producerController)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
"ProducerController without resends" must {
|
||||
"not resend last lost SequencedMessage when receiving Request" in {
|
||||
nextId()
|
||||
val consumerControllerProbe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](producerId, None), s"producerController-${idCount}")
|
||||
.unsafeUpcast[ProducerControllerImpl.InternalCommand]
|
||||
val producerProbe = createTestProbe[ProducerController.RequestNext[TestConsumer.Job]]()
|
||||
producerController ! ProducerController.Start(producerProbe.ref)
|
||||
|
||||
producerController ! ProducerController.RegisterConsumer(consumerControllerProbe.ref)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-1")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 1, producerController))
|
||||
|
||||
producerController ! ProducerControllerImpl.Request(1L, 10L, supportResend = false, false)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-2")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 2, producerController))
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-3")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 3, producerController))
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-4")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 4, producerController))
|
||||
|
||||
// let's say 3 and 4 are lost, and no more messages are sent from producer
|
||||
// ConsumerController will resend Request periodically
|
||||
producerController ! ProducerControllerImpl.Request(2L, 10L, supportResend = false, true)
|
||||
|
||||
// but 3 and 4 are not resent because supportResend = false
|
||||
consumerControllerProbe.expectNoMessage()
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-5")
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 5, producerController))
|
||||
|
||||
testKit.stop(producerController)
|
||||
}
|
||||
|
||||
"reply to MessageWithConfirmation for lost messages" in {
|
||||
nextId()
|
||||
val consumerControllerProbe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](producerId, None), s"producerController-${idCount}")
|
||||
.unsafeUpcast[ProducerControllerImpl.InternalCommand]
|
||||
val producerProbe = createTestProbe[ProducerController.RequestNext[TestConsumer.Job]]()
|
||||
producerController ! ProducerController.Start(producerProbe.ref)
|
||||
|
||||
producerController ! ProducerController.RegisterConsumer(consumerControllerProbe.ref)
|
||||
|
||||
val replyTo = createTestProbe[Long]()
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-1"), replyTo.ref)
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 1, producerController, ack = true))
|
||||
producerController ! ProducerControllerImpl.Request(1L, 10L, supportResend = false, false)
|
||||
replyTo.expectMessage(1L)
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-2"), replyTo.ref)
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 2, producerController, ack = true))
|
||||
producerController ! ProducerControllerImpl.Ack(2L)
|
||||
replyTo.expectMessage(2L)
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-3"), replyTo.ref)
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-4"), replyTo.ref)
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 3, producerController, ack = true))
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 4, producerController, ack = true))
|
||||
// Ack(3 lost, but Ack(4) triggers reply for 3 and 4
|
||||
producerController ! ProducerControllerImpl.Ack(4L)
|
||||
replyTo.expectMessage(3L)
|
||||
replyTo.expectMessage(4L)
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-5"), replyTo.ref)
|
||||
consumerControllerProbe.expectMessage(sequencedMessage(producerId, 5, producerController, ack = true))
|
||||
// Ack(5) lost, but eventually a Request will trigger the reply
|
||||
producerController ! ProducerControllerImpl.Request(5L, 15L, supportResend = false, false)
|
||||
replyTo.expectMessage(5L)
|
||||
|
||||
testKit.stop(producerController)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,200 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference
|
||||
|
||||
import scala.concurrent.duration._
|
||||
import scala.util.Random
|
||||
|
||||
import akka.actor.testkit.typed.scaladsl.LogCapturing
|
||||
import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.BehaviorInterceptor
|
||||
import akka.actor.typed.TypedActorContext
|
||||
import akka.actor.typed.delivery.internal.ProducerControllerImpl
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.actor.typed.scaladsl.LoggerOps
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
object ReliableDeliveryRandomSpec {
|
||||
object RandomFlakyNetwork {
|
||||
def apply[T](rnd: Random, dropProbability: Any => Double): BehaviorInterceptor[T, T] =
|
||||
new RandomFlakyNetwork(rnd, dropProbability).asInstanceOf[BehaviorInterceptor[T, T]]
|
||||
}
|
||||
|
||||
class RandomFlakyNetwork(rnd: Random, dropProbability: Any => Double) extends BehaviorInterceptor[Any, Any] {
|
||||
override def aroundReceive(
|
||||
ctx: TypedActorContext[Any],
|
||||
msg: Any,
|
||||
target: BehaviorInterceptor.ReceiveTarget[Any]): Behavior[Any] = {
|
||||
if (rnd.nextDouble() < dropProbability(msg)) {
|
||||
ctx.asScala.log.info("dropped {}", msg)
|
||||
Behaviors.same
|
||||
} else {
|
||||
target(ctx, msg)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
class ReliableDeliveryRandomSpec
|
||||
extends ScalaTestWithActorTestKit("""
|
||||
akka.reliable-delivery.consumer-controller.flow-control-window = 20
|
||||
""")
|
||||
with AnyWordSpecLike
|
||||
with LogCapturing {
|
||||
import ReliableDeliveryRandomSpec._
|
||||
|
||||
private var idCount = 0
|
||||
private def nextId(): Int = {
|
||||
idCount += 1
|
||||
idCount
|
||||
}
|
||||
|
||||
private def producerId: String = s"p-$idCount"
|
||||
|
||||
private def test(
|
||||
rndSeed: Long,
|
||||
rnd: Random,
|
||||
numberOfMessages: Int,
|
||||
producerDropProbability: Double,
|
||||
consumerDropProbability: Double,
|
||||
durableFailProbability: Option[Double],
|
||||
resendLost: Boolean): Unit = {
|
||||
|
||||
val consumerControllerSettings = ConsumerController.Settings(system).withOnlyFlowControl(!resendLost)
|
||||
|
||||
val consumerDelay = rnd.nextInt(40).millis
|
||||
val producerDelay = rnd.nextInt(40).millis
|
||||
val durableDelay = if (durableFailProbability.isDefined) rnd.nextInt(40).millis else Duration.Zero
|
||||
system.log.infoN(
|
||||
"Random seed [{}], consumerDropProbability [{}], producerDropProbability [{}], " +
|
||||
"consumerDelay [{}], producerDelay [{}], durableFailProbability [{}], durableDelay [{}]",
|
||||
rndSeed,
|
||||
consumerDropProbability,
|
||||
producerDropProbability,
|
||||
consumerDelay,
|
||||
producerDelay,
|
||||
durableFailProbability,
|
||||
durableDelay)
|
||||
|
||||
// RandomFlakyNetwork to simulate lost messages from producerController to consumerController
|
||||
val consumerDrop: Any => Double = {
|
||||
case _: ConsumerController.SequencedMessage[_] => consumerDropProbability
|
||||
case _ => 0.0
|
||||
}
|
||||
|
||||
val consumerEndProbe = createTestProbe[TestConsumer.CollectedProducerIds]()
|
||||
val consumerController =
|
||||
spawn(
|
||||
Behaviors.intercept(() => RandomFlakyNetwork[ConsumerController.Command[TestConsumer.Job]](rnd, consumerDrop))(
|
||||
ConsumerController[TestConsumer.Job](serviceKey = None, consumerControllerSettings)),
|
||||
s"consumerController-${idCount}")
|
||||
spawn(
|
||||
TestConsumer(consumerDelay, numberOfMessages, consumerEndProbe.ref, consumerController),
|
||||
name = s"destination-${idCount}")
|
||||
|
||||
// RandomFlakyNetwork to simulate lost messages from consumerController to producerController
|
||||
val producerDrop: Any => Double = {
|
||||
case _: ProducerControllerImpl.Request => producerDropProbability
|
||||
case _: ProducerControllerImpl.Resend => producerDropProbability
|
||||
case _: ProducerController.RegisterConsumer[_] => producerDropProbability
|
||||
case _ => 0.0
|
||||
}
|
||||
|
||||
val stateHolder = new AtomicReference[DurableProducerQueue.State[TestConsumer.Job]]
|
||||
val durableQueue = durableFailProbability.map { p =>
|
||||
TestDurableProducerQueue(
|
||||
durableDelay,
|
||||
stateHolder,
|
||||
(_: DurableProducerQueue.Command[TestConsumer.Job]) => rnd.nextDouble() < p)
|
||||
}
|
||||
|
||||
val producerController = spawn(
|
||||
Behaviors.intercept(() => RandomFlakyNetwork[ProducerController.Command[TestConsumer.Job]](rnd, producerDrop))(
|
||||
ProducerController[TestConsumer.Job](producerId, durableQueue)),
|
||||
s"producerController-${idCount}")
|
||||
val producer = spawn(TestProducer(producerDelay, producerController), name = s"producer-${idCount}")
|
||||
|
||||
consumerController ! ConsumerController.RegisterToProducerController(producerController)
|
||||
|
||||
consumerEndProbe.receiveMessage(120.seconds)
|
||||
|
||||
testKit.stop(producer)
|
||||
testKit.stop(producerController)
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"ReliableDelivery with random failures" must {
|
||||
|
||||
"work with flaky network" in {
|
||||
nextId()
|
||||
val rndSeed = System.currentTimeMillis()
|
||||
val rnd = new Random(rndSeed)
|
||||
val consumerDropProbability = 0.1 + rnd.nextDouble() * 0.4
|
||||
val producerDropProbability = 0.1 + rnd.nextDouble() * 0.3
|
||||
test(
|
||||
rndSeed,
|
||||
rnd,
|
||||
numberOfMessages = 63,
|
||||
producerDropProbability,
|
||||
consumerDropProbability,
|
||||
durableFailProbability = None,
|
||||
resendLost = true)
|
||||
}
|
||||
|
||||
"work with flaky DurableProducerQueue" in {
|
||||
nextId()
|
||||
val rndSeed = System.currentTimeMillis()
|
||||
val rnd = new Random(rndSeed)
|
||||
val durableFailProbability = 0.1 + rnd.nextDouble() * 0.2
|
||||
test(
|
||||
rndSeed,
|
||||
rnd,
|
||||
numberOfMessages = 31,
|
||||
producerDropProbability = 0.0,
|
||||
consumerDropProbability = 0.0,
|
||||
Some(durableFailProbability),
|
||||
resendLost = true)
|
||||
}
|
||||
|
||||
"work with flaky network and flaky DurableProducerQueue" in {
|
||||
nextId()
|
||||
val rndSeed = System.currentTimeMillis()
|
||||
val rnd = new Random(rndSeed)
|
||||
val consumerDropProbability = 0.1 + rnd.nextDouble() * 0.4
|
||||
val producerDropProbability = 0.1 + rnd.nextDouble() * 0.3
|
||||
val durableFailProbability = 0.1 + rnd.nextDouble() * 0.2
|
||||
test(
|
||||
rndSeed,
|
||||
rnd,
|
||||
numberOfMessages = 17,
|
||||
producerDropProbability,
|
||||
consumerDropProbability,
|
||||
Some(durableFailProbability),
|
||||
resendLost = true)
|
||||
}
|
||||
|
||||
"work with flaky network without resending" in {
|
||||
nextId()
|
||||
val rndSeed = System.currentTimeMillis()
|
||||
val rnd = new Random(rndSeed)
|
||||
val consumerDropProbability = 0.1 + rnd.nextDouble() * 0.4
|
||||
val producerDropProbability = 0.1 + rnd.nextDouble() * 0.3
|
||||
test(
|
||||
rndSeed,
|
||||
rnd,
|
||||
numberOfMessages = 63,
|
||||
producerDropProbability,
|
||||
consumerDropProbability,
|
||||
durableFailProbability = None,
|
||||
resendLost = false)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,190 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.actor.testkit.typed.scaladsl.LogCapturing
|
||||
import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
class ReliableDeliverySpec
|
||||
extends ScalaTestWithActorTestKit("""
|
||||
akka.reliable-delivery.consumer-controller.flow-control-window = 20
|
||||
""")
|
||||
with AnyWordSpecLike
|
||||
with LogCapturing {
|
||||
import TestConsumer.defaultConsumerDelay
|
||||
import TestProducer.defaultProducerDelay
|
||||
|
||||
private var idCount = 0
|
||||
private def nextId(): Int = {
|
||||
idCount += 1
|
||||
idCount
|
||||
}
|
||||
|
||||
"ReliableDelivery" must {
|
||||
|
||||
"illustrate point-to-point usage" in {
|
||||
nextId()
|
||||
val consumerEndProbe = createTestProbe[TestConsumer.CollectedProducerIds]()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
spawn(
|
||||
TestConsumer(defaultConsumerDelay, 42, consumerEndProbe.ref, consumerController),
|
||||
name = s"destination-${idCount}")
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](s"p-${idCount}", None), s"producerController-${idCount}")
|
||||
val producer = spawn(TestProducer(defaultProducerDelay, producerController), name = s"producer-${idCount}")
|
||||
|
||||
consumerController ! ConsumerController.RegisterToProducerController(producerController)
|
||||
|
||||
consumerEndProbe.receiveMessage(5.seconds)
|
||||
|
||||
testKit.stop(producer)
|
||||
testKit.stop(producerController)
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"illustrate point-to-point usage with ask" in {
|
||||
nextId()
|
||||
val consumerEndProbe = createTestProbe[TestConsumer.CollectedProducerIds]()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
spawn(
|
||||
TestConsumer(defaultConsumerDelay, 42, consumerEndProbe.ref, consumerController),
|
||||
name = s"destination-${idCount}")
|
||||
|
||||
val replyProbe = createTestProbe[Long]()
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](s"p-${idCount}", None), s"producerController-${idCount}")
|
||||
val producer =
|
||||
spawn(
|
||||
TestProducerWithAsk(defaultProducerDelay, replyProbe.ref, producerController),
|
||||
name = s"producer-${idCount}")
|
||||
|
||||
consumerController ! ConsumerController.RegisterToProducerController(producerController)
|
||||
|
||||
consumerEndProbe.receiveMessage(5.seconds)
|
||||
|
||||
replyProbe.receiveMessages(42, 5.seconds).toSet should ===((1L to 42L).toSet)
|
||||
|
||||
testKit.stop(producer)
|
||||
testKit.stop(producerController)
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
def testWithDelays(producerDelay: FiniteDuration, consumerDelay: FiniteDuration): Unit = {
|
||||
nextId()
|
||||
val consumerEndProbe = createTestProbe[TestConsumer.CollectedProducerIds]()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
spawn(TestConsumer(consumerDelay, 42, consumerEndProbe.ref, consumerController), name = s"destination-${idCount}")
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](s"p-${idCount}", None), s"producerController-${idCount}")
|
||||
val producer = spawn(TestProducer(producerDelay, producerController), name = s"producer-${idCount}")
|
||||
|
||||
consumerController ! ConsumerController.RegisterToProducerController(producerController)
|
||||
|
||||
consumerEndProbe.receiveMessage(5.seconds)
|
||||
|
||||
testKit.stop(producer)
|
||||
testKit.stop(producerController)
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"work with slow producer and fast consumer" in {
|
||||
testWithDelays(producerDelay = 30.millis, consumerDelay = Duration.Zero)
|
||||
}
|
||||
|
||||
"work with fast producer and slow consumer" in {
|
||||
testWithDelays(producerDelay = Duration.Zero, consumerDelay = 30.millis)
|
||||
}
|
||||
|
||||
"work with fast producer and fast consumer" in {
|
||||
testWithDelays(producerDelay = Duration.Zero, consumerDelay = Duration.Zero)
|
||||
}
|
||||
|
||||
"allow replacement of destination" in {
|
||||
nextId()
|
||||
val consumerEndProbe = createTestProbe[TestConsumer.CollectedProducerIds]()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController1-${idCount}")
|
||||
spawn(TestConsumer(defaultConsumerDelay, 42, consumerEndProbe.ref, consumerController), s"consumer1-${idCount}")
|
||||
|
||||
val producerController =
|
||||
spawn(ProducerController[TestConsumer.Job](s"p-${idCount}", None), s"producerController-${idCount}")
|
||||
val producer = spawn(TestProducer(defaultProducerDelay, producerController), name = s"producer-${idCount}")
|
||||
|
||||
consumerController ! ConsumerController.RegisterToProducerController(producerController)
|
||||
|
||||
consumerEndProbe.receiveMessage(5.seconds)
|
||||
|
||||
val consumerEndProbe2 = createTestProbe[TestConsumer.CollectedProducerIds]()
|
||||
val consumerController2 =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController2-${idCount}")
|
||||
spawn(TestConsumer(defaultConsumerDelay, 42, consumerEndProbe2.ref, consumerController2), s"consumer2-${idCount}")
|
||||
consumerController2 ! ConsumerController.RegisterToProducerController(producerController)
|
||||
|
||||
consumerEndProbe2.receiveMessage(5.seconds)
|
||||
|
||||
testKit.stop(producer)
|
||||
testKit.stop(producerController)
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"allow replacement of producer" in {
|
||||
nextId()
|
||||
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
val consumerController =
|
||||
spawn(ConsumerController[TestConsumer.Job](), s"consumerController-${idCount}")
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
val producerController1 =
|
||||
spawn(ProducerController[TestConsumer.Job](s"p-${idCount}", None), s"producerController1-${idCount}")
|
||||
val producerProbe1 = createTestProbe[ProducerController.RequestNext[TestConsumer.Job]]()
|
||||
producerController1 ! ProducerController.Start(producerProbe1.ref)
|
||||
|
||||
producerController1 ! ProducerController.RegisterConsumer(consumerController)
|
||||
|
||||
producerProbe1.receiveMessage().sendNextTo ! TestConsumer.Job("msg-1")
|
||||
val delivery1 = consumerProbe.receiveMessage()
|
||||
delivery1.message should ===(TestConsumer.Job("msg-1"))
|
||||
delivery1.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
producerProbe1.receiveMessage().sendNextTo ! TestConsumer.Job("msg-2")
|
||||
val delivery2 = consumerProbe.receiveMessage()
|
||||
delivery2.message should ===(TestConsumer.Job("msg-2"))
|
||||
delivery2.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
// replace producer
|
||||
testKit.stop(producerController1)
|
||||
val producerController2 =
|
||||
spawn(ProducerController[TestConsumer.Job](s"p-${idCount}", None), s"producerController2-${idCount}")
|
||||
val producerProbe2 = createTestProbe[ProducerController.RequestNext[TestConsumer.Job]]()
|
||||
producerController2 ! ProducerController.Start(producerProbe2.ref)
|
||||
producerController2 ! ProducerController.RegisterConsumer(consumerController)
|
||||
|
||||
producerProbe2.receiveMessage().sendNextTo ! TestConsumer.Job("msg-3")
|
||||
val delivery3 = consumerProbe.receiveMessage()
|
||||
delivery3.message should ===(TestConsumer.Job("msg-3"))
|
||||
delivery3.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
producerProbe2.receiveMessage().sendNextTo ! TestConsumer.Job("msg-4")
|
||||
val delivery4 = consumerProbe.receiveMessage()
|
||||
delivery4.message should ===(TestConsumer.Job("msg-4"))
|
||||
delivery4.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
testKit.stop(producerController2)
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import scala.concurrent.duration.Duration
|
||||
import scala.concurrent.duration.FiniteDuration
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import ConsumerController.SequencedMessage
|
||||
import akka.actor.typed.delivery.internal.ProducerControllerImpl
|
||||
import akka.actor.typed.scaladsl.ActorContext
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
|
||||
object TestConsumer {
|
||||
|
||||
final case class Job(payload: String)
|
||||
sealed trait Command
|
||||
final case class JobDelivery(
|
||||
msg: Job,
|
||||
confirmTo: ActorRef[ConsumerController.Confirmed],
|
||||
producerId: String,
|
||||
seqNr: Long)
|
||||
extends Command
|
||||
final case class SomeAsyncJob(
|
||||
msg: Job,
|
||||
confirmTo: ActorRef[ConsumerController.Confirmed],
|
||||
producerId: String,
|
||||
seqNr: Long)
|
||||
extends Command
|
||||
|
||||
final case class CollectedProducerIds(producerIds: Set[String])
|
||||
|
||||
val defaultConsumerDelay: FiniteDuration = 10.millis
|
||||
|
||||
def sequencedMessage(
|
||||
producerId: String,
|
||||
n: Long,
|
||||
producerController: ActorRef[ProducerController.Command[TestConsumer.Job]],
|
||||
ack: Boolean = false): SequencedMessage[TestConsumer.Job] = {
|
||||
ConsumerController.SequencedMessage(producerId, n, TestConsumer.Job(s"msg-$n"), first = n == 1, ack)(
|
||||
producerController.unsafeUpcast[ProducerControllerImpl.InternalCommand])
|
||||
}
|
||||
|
||||
def consumerEndCondition(seqNr: Long): TestConsumer.SomeAsyncJob => Boolean = {
|
||||
case TestConsumer.SomeAsyncJob(_, _, _, nr) => nr >= seqNr
|
||||
}
|
||||
|
||||
def apply(
|
||||
delay: FiniteDuration,
|
||||
endSeqNr: Long,
|
||||
endReplyTo: ActorRef[CollectedProducerIds],
|
||||
controller: ActorRef[ConsumerController.Start[TestConsumer.Job]]): Behavior[Command] =
|
||||
apply(delay, consumerEndCondition(endSeqNr), endReplyTo, controller)
|
||||
|
||||
def apply(
|
||||
delay: FiniteDuration,
|
||||
endCondition: SomeAsyncJob => Boolean,
|
||||
endReplyTo: ActorRef[CollectedProducerIds],
|
||||
controller: ActorRef[ConsumerController.Start[TestConsumer.Job]]): Behavior[Command] =
|
||||
Behaviors.setup[Command] { ctx =>
|
||||
new TestConsumer(ctx, delay, endCondition, endReplyTo, controller).active(Set.empty)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class TestConsumer(
|
||||
ctx: ActorContext[TestConsumer.Command],
|
||||
delay: FiniteDuration,
|
||||
endCondition: TestConsumer.SomeAsyncJob => Boolean,
|
||||
endReplyTo: ActorRef[TestConsumer.CollectedProducerIds],
|
||||
controller: ActorRef[ConsumerController.Start[TestConsumer.Job]]) {
|
||||
import TestConsumer._
|
||||
|
||||
ctx.setLoggerName("TestConsumer")
|
||||
|
||||
private val deliverTo: ActorRef[ConsumerController.Delivery[Job]] =
|
||||
ctx.messageAdapter(d => JobDelivery(d.message, d.confirmTo, d.producerId, d.seqNr))
|
||||
|
||||
controller ! ConsumerController.Start(deliverTo)
|
||||
|
||||
private def active(processed: Set[(String, Long)]): Behavior[Command] = {
|
||||
Behaviors.receive { (ctx, m) =>
|
||||
m match {
|
||||
case JobDelivery(msg, confirmTo, producerId, seqNr) =>
|
||||
// confirmation can be later, asynchronously
|
||||
if (delay == Duration.Zero)
|
||||
ctx.self ! SomeAsyncJob(msg, confirmTo, producerId, seqNr)
|
||||
else
|
||||
// schedule to simulate slow consumer
|
||||
ctx.scheduleOnce(10.millis, ctx.self, SomeAsyncJob(msg, confirmTo, producerId, seqNr))
|
||||
Behaviors.same
|
||||
|
||||
case job @ SomeAsyncJob(_, confirmTo, producerId, seqNr) =>
|
||||
// when replacing producer the seqNr may start from 1 again
|
||||
val cleanProcessed =
|
||||
if (seqNr == 1L) processed.filterNot { case (pid, _) => pid == producerId } else processed
|
||||
|
||||
if (cleanProcessed((producerId, seqNr)))
|
||||
throw new RuntimeException(s"Received duplicate [($producerId,$seqNr)]")
|
||||
ctx.log.info("processed [{}] from [{}]", seqNr, producerId)
|
||||
confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
if (endCondition(job)) {
|
||||
endReplyTo ! CollectedProducerIds(processed.map(_._1))
|
||||
Behaviors.stopped
|
||||
} else
|
||||
active(cleanProcessed + (producerId -> seqNr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference
|
||||
|
||||
import scala.concurrent.duration.Duration
|
||||
import scala.concurrent.duration.FiniteDuration
|
||||
|
||||
import akka.actor.testkit.typed.TestException
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.SupervisorStrategy
|
||||
import akka.actor.typed.scaladsl.ActorContext
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
|
||||
object TestDurableProducerQueue {
|
||||
import DurableProducerQueue._
|
||||
def apply[A](
|
||||
delay: FiniteDuration,
|
||||
stateHolder: AtomicReference[State[A]],
|
||||
failWhen: Command[A] => Boolean): Behavior[Command[A]] = {
|
||||
if (stateHolder.get() eq null)
|
||||
stateHolder.set(State(1L, 0L, Map.empty, Vector.empty))
|
||||
|
||||
Behaviors
|
||||
.supervise {
|
||||
Behaviors.setup[Command[A]] { context =>
|
||||
context.setLoggerName("TestDurableProducerQueue")
|
||||
val state = stateHolder.get()
|
||||
context.log.info("Starting with seqNr [{}], confirmedSeqNr [{}]", state.currentSeqNr, state.confirmedSeqNr)
|
||||
new TestDurableProducerQueue[A](context, delay, stateHolder, failWhen).active(state)
|
||||
}
|
||||
}
|
||||
.onFailure(SupervisorStrategy.restartWithBackoff(delay, delay, 0.0))
|
||||
}
|
||||
|
||||
def apply[A](delay: FiniteDuration, state: State[A]): Behavior[Command[A]] = {
|
||||
apply(delay, new AtomicReference(state), _ => false)
|
||||
}
|
||||
|
||||
// using a fixed timestamp to simplify tests, not using the timestamps in the commands
|
||||
val TestTimestamp: DurableProducerQueue.TimestampMillis = Long.MaxValue
|
||||
|
||||
}
|
||||
|
||||
class TestDurableProducerQueue[A](
|
||||
context: ActorContext[DurableProducerQueue.Command[A]],
|
||||
delay: FiniteDuration,
|
||||
stateHolder: AtomicReference[DurableProducerQueue.State[A]],
|
||||
failWhen: DurableProducerQueue.Command[A] => Boolean) {
|
||||
import DurableProducerQueue._
|
||||
import TestDurableProducerQueue.TestTimestamp
|
||||
|
||||
private def active(state: State[A]): Behavior[Command[A]] = {
|
||||
stateHolder.set(state)
|
||||
Behaviors.receiveMessage {
|
||||
case cmd: LoadState[A] @unchecked =>
|
||||
maybeFail(cmd)
|
||||
if (delay == Duration.Zero) cmd.replyTo ! state else context.scheduleOnce(delay, cmd.replyTo, state)
|
||||
Behaviors.same
|
||||
|
||||
case cmd: StoreMessageSent[A] @unchecked =>
|
||||
if (cmd.sent.seqNr == state.currentSeqNr) {
|
||||
context.log.info(
|
||||
"StoreMessageSent seqNr [{}], confirmationQualifier [{}]",
|
||||
cmd.sent.seqNr,
|
||||
cmd.sent.confirmationQualifier)
|
||||
maybeFail(cmd)
|
||||
val reply = StoreMessageSentAck(cmd.sent.seqNr)
|
||||
if (delay == Duration.Zero) cmd.replyTo ! reply else context.scheduleOnce(delay, cmd.replyTo, reply)
|
||||
active(
|
||||
state.copy(
|
||||
currentSeqNr = cmd.sent.seqNr + 1,
|
||||
unconfirmed = state.unconfirmed :+ cmd.sent.copy(timestampMillis = TestTimestamp)))
|
||||
} else if (cmd.sent.seqNr == state.currentSeqNr - 1) {
|
||||
// already stored, could be a retry after timout
|
||||
context.log.info("Duplicate seqNr [{}], currentSeqNr [{}]", cmd.sent.seqNr, state.currentSeqNr)
|
||||
val reply = StoreMessageSentAck(cmd.sent.seqNr)
|
||||
if (delay == Duration.Zero) cmd.replyTo ! reply else context.scheduleOnce(delay, cmd.replyTo, reply)
|
||||
Behaviors.same
|
||||
} else {
|
||||
// may happen after failure
|
||||
context.log.info("Ignoring unexpected seqNr [{}], currentSeqNr [{}]", cmd.sent.seqNr, state.currentSeqNr)
|
||||
Behaviors.unhandled // no reply, request will timeout
|
||||
}
|
||||
|
||||
case cmd: StoreMessageConfirmed[A] @unchecked =>
|
||||
context.log.info(
|
||||
"StoreMessageConfirmed seqNr [{}], confirmationQualifier [{}]",
|
||||
cmd.seqNr,
|
||||
cmd.confirmationQualifier)
|
||||
maybeFail(cmd)
|
||||
val newUnconfirmed = state.unconfirmed.filterNot { u =>
|
||||
u.confirmationQualifier == cmd.confirmationQualifier && u.seqNr <= cmd.seqNr
|
||||
}
|
||||
val newHighestConfirmed = math.max(state.highestConfirmedSeqNr, cmd.seqNr)
|
||||
active(
|
||||
state.copy(
|
||||
highestConfirmedSeqNr = newHighestConfirmed,
|
||||
confirmedSeqNr = state.confirmedSeqNr.updated(cmd.confirmationQualifier, (cmd.seqNr, TestTimestamp)),
|
||||
unconfirmed = newUnconfirmed))
|
||||
}
|
||||
}
|
||||
|
||||
private def maybeFail(cmd: Command[A]): Unit = {
|
||||
if (failWhen(cmd))
|
||||
throw TestException(s"TestDurableProducerQueue failed at [$cmd]")
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import scala.concurrent.duration.Duration
|
||||
import scala.concurrent.duration.FiniteDuration
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.scaladsl.ActorContext
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
|
||||
object TestProducer {
|
||||
|
||||
trait Command
|
||||
final case class RequestNext(sendTo: ActorRef[TestConsumer.Job]) extends Command
|
||||
private final case object Tick extends Command
|
||||
|
||||
val defaultProducerDelay: FiniteDuration = 20.millis
|
||||
|
||||
def apply(
|
||||
delay: FiniteDuration,
|
||||
producerController: ActorRef[ProducerController.Start[TestConsumer.Job]]): Behavior[Command] = {
|
||||
Behaviors.setup { context =>
|
||||
context.setLoggerName("TestProducer")
|
||||
val requestNextAdapter: ActorRef[ProducerController.RequestNext[TestConsumer.Job]] =
|
||||
context.messageAdapter(req => RequestNext(req.sendNextTo))
|
||||
producerController ! ProducerController.Start(requestNextAdapter)
|
||||
|
||||
if (delay == Duration.Zero)
|
||||
activeNoDelay(1) // simulate fast producer
|
||||
else {
|
||||
Behaviors.withTimers { timers =>
|
||||
timers.startTimerWithFixedDelay(Tick, Tick, delay)
|
||||
idle(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def idle(n: Int): Behavior[Command] = {
|
||||
Behaviors.receiveMessage {
|
||||
case Tick => Behaviors.same
|
||||
case RequestNext(sendTo) => active(n + 1, sendTo)
|
||||
}
|
||||
}
|
||||
|
||||
private def active(n: Int, sendTo: ActorRef[TestConsumer.Job]): Behavior[Command] = {
|
||||
Behaviors.receive { (ctx, msg) =>
|
||||
msg match {
|
||||
case Tick =>
|
||||
sendMessage(n, sendTo, ctx)
|
||||
idle(n)
|
||||
|
||||
case RequestNext(_) =>
|
||||
throw new IllegalStateException("Unexpected RequestNext, already got one.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def activeNoDelay(n: Int): Behavior[Command] = {
|
||||
Behaviors.receive { (ctx, msg) =>
|
||||
msg match {
|
||||
case RequestNext(sendTo) =>
|
||||
sendMessage(n, sendTo, ctx)
|
||||
activeNoDelay(n + 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def sendMessage(n: Int, sendTo: ActorRef[TestConsumer.Job], ctx: ActorContext[Command]): Unit = {
|
||||
val msg = s"msg-$n"
|
||||
ctx.log.info("sent {}", msg)
|
||||
sendTo ! TestConsumer.Job(msg)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import scala.concurrent.duration.FiniteDuration
|
||||
import scala.concurrent.duration._
|
||||
import scala.util.Failure
|
||||
import scala.util.Success
|
||||
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.util.Timeout
|
||||
|
||||
object TestProducerWithAsk {
|
||||
|
||||
trait Command
|
||||
final case class RequestNext(askTo: ActorRef[ProducerController.MessageWithConfirmation[TestConsumer.Job]])
|
||||
extends Command
|
||||
private case object Tick extends Command
|
||||
private final case class Confirmed(seqNr: Long) extends Command
|
||||
private case object AskTimeout extends Command
|
||||
|
||||
private implicit val askTimeout: Timeout = 10.seconds
|
||||
|
||||
def apply(
|
||||
delay: FiniteDuration,
|
||||
replyProbe: ActorRef[Long],
|
||||
producerController: ActorRef[ProducerController.Start[TestConsumer.Job]]): Behavior[Command] = {
|
||||
Behaviors.setup { context =>
|
||||
context.setLoggerName("TestProducerWithConfirmation")
|
||||
val requestNextAdapter: ActorRef[ProducerController.RequestNext[TestConsumer.Job]] =
|
||||
context.messageAdapter(req => RequestNext(req.askNextTo))
|
||||
producerController ! ProducerController.Start(requestNextAdapter)
|
||||
|
||||
Behaviors.withTimers { timers =>
|
||||
timers.startTimerWithFixedDelay(Tick, Tick, delay)
|
||||
idle(0, replyProbe)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def idle(n: Int, replyProbe: ActorRef[Long]): Behavior[Command] = {
|
||||
Behaviors.receiveMessage {
|
||||
case Tick => Behaviors.same
|
||||
case RequestNext(sendTo) => active(n + 1, replyProbe, sendTo)
|
||||
case Confirmed(seqNr) =>
|
||||
replyProbe ! seqNr
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
private def active(
|
||||
n: Int,
|
||||
replyProbe: ActorRef[Long],
|
||||
sendTo: ActorRef[ProducerController.MessageWithConfirmation[TestConsumer.Job]]): Behavior[Command] = {
|
||||
Behaviors.receive { (ctx, msg) =>
|
||||
msg match {
|
||||
case Tick =>
|
||||
val msg = s"msg-$n"
|
||||
ctx.log.info("sent {}", msg)
|
||||
ctx.ask(
|
||||
sendTo,
|
||||
(askReplyTo: ActorRef[Long]) =>
|
||||
ProducerController.MessageWithConfirmation(TestConsumer.Job(msg), askReplyTo)) {
|
||||
case Success(seqNr) => Confirmed(seqNr)
|
||||
case Failure(_) => AskTimeout
|
||||
}
|
||||
idle(n, replyProbe)
|
||||
|
||||
case RequestNext(_) =>
|
||||
throw new IllegalStateException("Unexpected RequestNext, already got one.")
|
||||
|
||||
case Confirmed(seqNr) =>
|
||||
ctx.log.info("Reply Confirmed [{}]", seqNr)
|
||||
replyProbe ! seqNr
|
||||
Behaviors.same
|
||||
|
||||
case AskTimeout =>
|
||||
ctx.log.warn("Timeout")
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import scala.concurrent.duration.FiniteDuration
|
||||
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
|
||||
object TestProducerWorkPulling {
|
||||
|
||||
trait Command
|
||||
final case class RequestNext(sendTo: ActorRef[TestConsumer.Job]) extends Command
|
||||
private final case object Tick extends Command
|
||||
|
||||
def apply(
|
||||
delay: FiniteDuration,
|
||||
producerController: ActorRef[WorkPullingProducerController.Start[TestConsumer.Job]]): Behavior[Command] = {
|
||||
Behaviors.setup { context =>
|
||||
context.setLoggerName("TestProducerWorkPulling")
|
||||
val requestNextAdapter: ActorRef[WorkPullingProducerController.RequestNext[TestConsumer.Job]] =
|
||||
context.messageAdapter(req => RequestNext(req.sendNextTo))
|
||||
producerController ! WorkPullingProducerController.Start(requestNextAdapter)
|
||||
|
||||
Behaviors.withTimers { timers =>
|
||||
timers.startTimerWithFixedDelay(Tick, Tick, delay)
|
||||
idle(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def idle(n: Int): Behavior[Command] = {
|
||||
Behaviors.receiveMessage {
|
||||
case Tick => Behaviors.same
|
||||
case RequestNext(sendTo) => active(n + 1, sendTo)
|
||||
}
|
||||
}
|
||||
|
||||
private def active(n: Int, sendTo: ActorRef[TestConsumer.Job]): Behavior[Command] = {
|
||||
Behaviors.receive { (ctx, msg) =>
|
||||
msg match {
|
||||
case Tick =>
|
||||
val msg = s"msg-$n"
|
||||
ctx.log.info("sent {}", msg)
|
||||
sendTo ! TestConsumer.Job(msg)
|
||||
idle(n)
|
||||
|
||||
case RequestNext(_) =>
|
||||
throw new IllegalStateException("Unexpected RequestNext, already got one.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,278 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.Done
|
||||
import akka.actor.testkit.typed.scaladsl.LogCapturing
|
||||
import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.delivery.internal.ProducerControllerImpl
|
||||
import akka.actor.typed.receptionist.Receptionist
|
||||
import akka.actor.typed.receptionist.ServiceKey
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
class WorkPullingSpec
|
||||
extends ScalaTestWithActorTestKit("""
|
||||
akka.reliable-delivery.consumer-controller.flow-control-window = 20
|
||||
""")
|
||||
with AnyWordSpecLike
|
||||
with LogCapturing {
|
||||
import TestConsumer.defaultConsumerDelay
|
||||
import TestProducer.defaultProducerDelay
|
||||
|
||||
private var idCount = 0
|
||||
private def nextId(): Int = {
|
||||
idCount += 1
|
||||
idCount
|
||||
}
|
||||
|
||||
private def producerId: String = s"p-$idCount"
|
||||
|
||||
private def awaitWorkersRegistered(
|
||||
controller: ActorRef[WorkPullingProducerController.Command[TestConsumer.Job]],
|
||||
count: Int): Unit = {
|
||||
val probe = createTestProbe[WorkPullingProducerController.WorkerStats]()
|
||||
probe.awaitAssert {
|
||||
controller ! WorkPullingProducerController.GetWorkerStats(probe.ref)
|
||||
probe.receiveMessage().numberOfWorkers should ===(count)
|
||||
}
|
||||
}
|
||||
|
||||
val workerServiceKey: ServiceKey[ConsumerController.Command[TestConsumer.Job]] = ServiceKey("worker")
|
||||
|
||||
"ReliableDelivery with work-pulling" must {
|
||||
|
||||
"illustrate work-pulling usage" in {
|
||||
nextId()
|
||||
val workPullingController =
|
||||
spawn(
|
||||
WorkPullingProducerController[TestConsumer.Job](producerId, workerServiceKey, None),
|
||||
s"workPullingController-${idCount}")
|
||||
val jobProducer =
|
||||
spawn(TestProducerWorkPulling(defaultProducerDelay, workPullingController), name = s"jobProducer-${idCount}")
|
||||
|
||||
val consumerEndProbe1 = createTestProbe[TestConsumer.CollectedProducerIds]()
|
||||
val workerController1 =
|
||||
spawn(ConsumerController[TestConsumer.Job](workerServiceKey), s"workerController1-${idCount}")
|
||||
spawn(
|
||||
TestConsumer(defaultConsumerDelay, 42, consumerEndProbe1.ref, workerController1),
|
||||
name = s"worker1-${idCount}")
|
||||
|
||||
val consumerEndProbe2 = createTestProbe[TestConsumer.CollectedProducerIds]()
|
||||
val workerController2 =
|
||||
spawn(ConsumerController[TestConsumer.Job](workerServiceKey), s"workerController2-${idCount}")
|
||||
spawn(
|
||||
TestConsumer(defaultConsumerDelay, 42, consumerEndProbe2.ref, workerController2),
|
||||
name = s"worker2-${idCount}")
|
||||
|
||||
consumerEndProbe1.receiveMessage(10.seconds)
|
||||
consumerEndProbe2.receiveMessage()
|
||||
|
||||
testKit.stop(workerController1)
|
||||
testKit.stop(workerController2)
|
||||
awaitWorkersRegistered(workPullingController, 0)
|
||||
testKit.stop(jobProducer)
|
||||
testKit.stop(workPullingController)
|
||||
}
|
||||
|
||||
"resend unconfirmed to other if worker dies" in {
|
||||
nextId()
|
||||
val workPullingController =
|
||||
spawn(
|
||||
WorkPullingProducerController[TestConsumer.Job](producerId, workerServiceKey, None),
|
||||
s"workPullingController-${idCount}")
|
||||
val producerProbe = createTestProbe[WorkPullingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
workPullingController ! WorkPullingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val workerController1Probe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
system.receptionist ! Receptionist.Register(workerServiceKey, workerController1Probe.ref)
|
||||
awaitWorkersRegistered(workPullingController, 1)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-1")
|
||||
val seqMsg1 = workerController1Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg1.message should ===(TestConsumer.Job("msg-1"))
|
||||
seqMsg1.producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-2")
|
||||
workerController1Probe
|
||||
.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
.message should ===(TestConsumer.Job("msg-2"))
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-3")
|
||||
workerController1Probe
|
||||
.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
.message should ===(TestConsumer.Job("msg-3"))
|
||||
|
||||
val workerController2Probe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
system.receptionist ! Receptionist.Register(workerServiceKey, workerController2Probe.ref)
|
||||
awaitWorkersRegistered(workPullingController, 2)
|
||||
|
||||
workerController1Probe.stop()
|
||||
awaitWorkersRegistered(workPullingController, 1)
|
||||
|
||||
// msg-2 and msg3 were not confirmed and should be resent to another worker
|
||||
val seqMsg2 = workerController2Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg2.message should ===(TestConsumer.Job("msg-2"))
|
||||
seqMsg2.seqNr should ===(1)
|
||||
seqMsg2.producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
|
||||
workerController2Probe
|
||||
.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
.message should ===(TestConsumer.Job("msg-3"))
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-4")
|
||||
workerController2Probe
|
||||
.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
.message should ===(TestConsumer.Job("msg-4"))
|
||||
|
||||
workerController2Probe.stop()
|
||||
awaitWorkersRegistered(workPullingController, 0)
|
||||
testKit.stop(workPullingController)
|
||||
}
|
||||
|
||||
"reply to MessageWithConfirmation" in {
|
||||
import WorkPullingProducerController.MessageWithConfirmation
|
||||
nextId()
|
||||
val workPullingController =
|
||||
spawn(
|
||||
WorkPullingProducerController[TestConsumer.Job](producerId, workerServiceKey, None),
|
||||
s"workPullingController-${idCount}")
|
||||
val producerProbe = createTestProbe[WorkPullingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
workPullingController ! WorkPullingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val workerController1Probe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
system.receptionist ! Receptionist.Register(workerServiceKey, workerController1Probe.ref)
|
||||
awaitWorkersRegistered(workPullingController, 1)
|
||||
|
||||
val replyProbe = createTestProbe[Done]()
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-1"), replyProbe.ref)
|
||||
val seqMsg1 = workerController1Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg1.message should ===(TestConsumer.Job("msg-1"))
|
||||
seqMsg1.ack should ===(true)
|
||||
seqMsg1.producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
replyProbe.receiveMessage()
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-2"), replyProbe.ref)
|
||||
val seqMsg2 = workerController1Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg2.message should ===(TestConsumer.Job("msg-2"))
|
||||
seqMsg2.ack should ===(true)
|
||||
// no reply until ack
|
||||
replyProbe.expectNoMessage()
|
||||
seqMsg2.producerController ! ProducerControllerImpl.Ack(2L)
|
||||
replyProbe.receiveMessage()
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-3"), replyProbe.ref)
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-4"), replyProbe.ref)
|
||||
workerController1Probe.receiveMessages(2)
|
||||
seqMsg2.producerController ! ProducerControllerImpl.Ack(4L)
|
||||
replyProbe.receiveMessages(2)
|
||||
|
||||
workerController1Probe.stop()
|
||||
awaitWorkersRegistered(workPullingController, 0)
|
||||
testKit.stop(workPullingController)
|
||||
}
|
||||
|
||||
"reply to MessageWithConfirmation also when worker dies" in {
|
||||
import WorkPullingProducerController.MessageWithConfirmation
|
||||
nextId()
|
||||
val workPullingController =
|
||||
spawn(
|
||||
WorkPullingProducerController[TestConsumer.Job](producerId, workerServiceKey, None),
|
||||
s"workPullingController-${idCount}")
|
||||
val producerProbe = createTestProbe[WorkPullingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
workPullingController ! WorkPullingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val workerController1Probe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
system.receptionist ! Receptionist.Register(workerServiceKey, workerController1Probe.ref)
|
||||
awaitWorkersRegistered(workPullingController, 1)
|
||||
|
||||
val replyProbe = createTestProbe[Done]()
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-1"), replyProbe.ref)
|
||||
val seqMsg1 = workerController1Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg1.producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
replyProbe.receiveMessage()
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-2"), replyProbe.ref)
|
||||
workerController1Probe.receiveMessage()
|
||||
seqMsg1.producerController ! ProducerControllerImpl.Ack(2L)
|
||||
replyProbe.receiveMessage()
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-3"), replyProbe.ref)
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(TestConsumer.Job("msg-4"), replyProbe.ref)
|
||||
workerController1Probe.receiveMessages(2)
|
||||
|
||||
val workerController2Probe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
system.receptionist ! Receptionist.Register(workerServiceKey, workerController2Probe.ref)
|
||||
awaitWorkersRegistered(workPullingController, 2)
|
||||
|
||||
workerController1Probe.stop()
|
||||
awaitWorkersRegistered(workPullingController, 1)
|
||||
replyProbe.expectNoMessage()
|
||||
|
||||
// msg-3 and msg-4 were not confirmed and should be resent to another worker
|
||||
val seqMsg3 = workerController2Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg3.message should ===(TestConsumer.Job("msg-3"))
|
||||
seqMsg3.seqNr should ===(1)
|
||||
seqMsg3.producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
replyProbe.receiveMessage()
|
||||
|
||||
workerController2Probe
|
||||
.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
.message should ===(TestConsumer.Job("msg-4"))
|
||||
seqMsg3.producerController ! ProducerControllerImpl.Ack(2L)
|
||||
replyProbe.receiveMessage()
|
||||
|
||||
workerController2Probe.stop()
|
||||
awaitWorkersRegistered(workPullingController, 0)
|
||||
testKit.stop(workPullingController)
|
||||
}
|
||||
|
||||
"allow restart of producer" in {
|
||||
nextId()
|
||||
|
||||
val workPullingController =
|
||||
spawn(
|
||||
WorkPullingProducerController[TestConsumer.Job](producerId, workerServiceKey, None),
|
||||
s"workPullingController-${idCount}")
|
||||
val producerProbe = createTestProbe[WorkPullingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
workPullingController ! WorkPullingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val workerController1Probe = createTestProbe[ConsumerController.Command[TestConsumer.Job]]()
|
||||
system.receptionist ! Receptionist.Register(workerServiceKey, workerController1Probe.ref)
|
||||
awaitWorkersRegistered(workPullingController, 1)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-1")
|
||||
val seqMsg1 = workerController1Probe.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
seqMsg1.message should ===(TestConsumer.Job("msg-1"))
|
||||
seqMsg1.producerController ! ProducerControllerImpl.Request(1L, 10L, true, false)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! TestConsumer.Job("msg-2")
|
||||
workerController1Probe
|
||||
.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
.message should ===(TestConsumer.Job("msg-2"))
|
||||
producerProbe.receiveMessage()
|
||||
|
||||
// restart producer, new Start
|
||||
val producerProbe2 = createTestProbe[WorkPullingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
workPullingController ! WorkPullingProducerController.Start(producerProbe2.ref)
|
||||
|
||||
producerProbe2.receiveMessage().sendNextTo ! TestConsumer.Job("msg-3")
|
||||
workerController1Probe
|
||||
.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
.message should ===(TestConsumer.Job("msg-3"))
|
||||
producerProbe2.receiveMessage().sendNextTo ! TestConsumer.Job("msg-4")
|
||||
workerController1Probe
|
||||
.expectMessageType[ConsumerController.SequencedMessage[TestConsumer.Job]]
|
||||
.message should ===(TestConsumer.Job("msg-4"))
|
||||
|
||||
testKit.stop(workPullingController)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TODO #28723 add a random test for work pulling
|
||||
|
|
@ -64,3 +64,47 @@ akka.actor {
|
|||
#
|
||||
# This behavior can be disabled by setting this property to `off`.
|
||||
akka.use-slf4j = on
|
||||
|
||||
akka.reliable-delivery {
|
||||
producer-controller {
|
||||
durable-queue {
|
||||
# The ProducerController uses this timeout for the requests to
|
||||
# the durable queue. If there is no reply within the timeout it
|
||||
# will be retried.
|
||||
request-timeout = 3s
|
||||
|
||||
# The ProducerController retries requests to the durable queue this
|
||||
# number of times before failing.
|
||||
retry-attempts = 10
|
||||
}
|
||||
}
|
||||
|
||||
consumer-controller {
|
||||
# Number of messages in flight between ProducerController and
|
||||
# ConsumerController. The ConsumerController requests for more messages
|
||||
# when half of the window has been used.
|
||||
flow-control-window = 50
|
||||
|
||||
# The ConsumerController resends flow control messages to the
|
||||
# ProducerController with this interval.
|
||||
resend-interval = 1s
|
||||
|
||||
# If this is enabled lost messages will not be resent, but flow control is used.
|
||||
# This can be more efficient since messages don't have to be
|
||||
# kept in memory in the `ProducerController` until they have been
|
||||
# confirmed, but the drawback is that lost messages will not be delivered.
|
||||
only-flow-control = false
|
||||
}
|
||||
|
||||
work-pulling {
|
||||
producer-controller = ${akka.reliable-delivery.producer-controller}
|
||||
producer-controller {
|
||||
# Limit of how many messages that can be buffered when there
|
||||
# is no demand from the consumer side.
|
||||
buffer-size = 1000
|
||||
|
||||
# Ask timeout for sending message to worker until receiving Ack from worker
|
||||
internal-ask-timeout = 60s
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,280 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import java.time.{ Duration => JavaDuration }
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.actor.DeadLetterSuppression
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.ActorSystem
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.delivery.internal.ConsumerControllerImpl
|
||||
import akka.actor.typed.delivery.internal.DeliverySerializable
|
||||
import akka.actor.typed.delivery.internal.ProducerControllerImpl
|
||||
import akka.actor.typed.receptionist.ServiceKey
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.annotation.ApiMayChange
|
||||
import akka.annotation.DoNotInherit
|
||||
import akka.annotation.InternalApi
|
||||
import akka.util.JavaDurationConverters._
|
||||
import com.typesafe.config.Config
|
||||
|
||||
/**
|
||||
* `ConsumerController` and [[ProducerController]] or [[WorkPullingProducerController]] are used
|
||||
* together. See the descriptions in those classes or the Akka reference documentation for
|
||||
* how they are intended to be used.
|
||||
*
|
||||
* The destination consumer actor will start the flow by sending an initial [[ConsumerController.Start]]
|
||||
* message to the `ConsumerController`. The `ActorRef` in the `Start` message is typically constructed
|
||||
* as a message adapter to map the [[ConsumerController.Delivery]] to the protocol of the consumer actor.
|
||||
*
|
||||
* Received messages from the producer are wrapped in [[ConsumerController.Delivery]] when sent to the consumer,
|
||||
* which is supposed to reply with [[ConsumerController.Confirmed]] when it has processed the message.
|
||||
* Next message is not delivered until the previous is confirmed.
|
||||
* More messages from the producer that arrive while waiting for the confirmation are stashed by
|
||||
* the `ConsumerController` and delivered when previous message was confirmed.
|
||||
*
|
||||
* The consumer and the `ConsumerController` actors are supposed to be local so that these messages are fast
|
||||
* and not lost. This is enforced by a runtime check.
|
||||
*
|
||||
* The `ConsumerController` is automatically stopped when the consumer that registered with the `Start`
|
||||
* message is terminated.
|
||||
*/
|
||||
@ApiMayChange // TODO #28719 when removing ApiMayChange consider removing `case class` for some of the messages
|
||||
object ConsumerController {
|
||||
import ConsumerControllerImpl.UnsealedInternalCommand
|
||||
|
||||
type SeqNr = Long
|
||||
|
||||
sealed trait Command[+A] extends UnsealedInternalCommand
|
||||
|
||||
/**
|
||||
* Initial message from the consumer actor. The `deliverTo` is typically constructed
|
||||
* as a message adapter to map the [[Delivery]] to the protocol of the consumer actor.
|
||||
*
|
||||
* If the producer is restarted it should send a new `Start` message to the
|
||||
* `ConsumerController`.
|
||||
*/
|
||||
final case class Start[A](deliverTo: ActorRef[Delivery[A]]) extends Command[A]
|
||||
|
||||
object Delivery {
|
||||
def apply[A](message: A, confirmTo: ActorRef[Confirmed], producerId: String, seqNr: SeqNr): Delivery[A] =
|
||||
new Delivery(message, confirmTo, producerId, seqNr)
|
||||
|
||||
def unapply[A](delivery: Delivery[A]): Option[(A, ActorRef[Confirmed])] =
|
||||
Option((delivery.message, delivery.confirmTo))
|
||||
}
|
||||
|
||||
/**
|
||||
* Received messages from the producer are wrapped in `Delivery` when sent to the consumer.
|
||||
* When the message has been processed the consumer is supposed to send [[Confirmed]] back
|
||||
* to the `ConsumerController` via the `confirmTo`.
|
||||
*/
|
||||
final class Delivery[A](
|
||||
val message: A,
|
||||
val confirmTo: ActorRef[Confirmed],
|
||||
val producerId: String,
|
||||
val seqNr: SeqNr) {
|
||||
override def toString: String = s"Delivery($message,$confirmTo,$producerId,$seqNr)"
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API: The generic `Class` type for `ConsumerController.Delivery` that can be used when creating a
|
||||
* `messageAdapter` for `Class<Delivery<MessageType>>`.
|
||||
*/
|
||||
def deliveryClass[A](): Class[Delivery[A]] = classOf[Delivery[A]]
|
||||
|
||||
/**
|
||||
* Java API: The generic `Class` type for `ConsumerController.Command` that can be used when creating a `ServiceKey`
|
||||
* for `Class<Command<MessageType>>`.
|
||||
*/
|
||||
def serviceKeyClass[A]: Class[Command[A]] = classOf[Command[A]]
|
||||
|
||||
@DoNotInherit
|
||||
trait Confirmed extends UnsealedInternalCommand
|
||||
|
||||
/**
|
||||
* When the message has been processed the consumer is supposed to send `Confirmed` back
|
||||
* to the `ConsumerController` via the `confirmTo` in the [[Delivery]] message.
|
||||
*/
|
||||
case object Confirmed extends Confirmed
|
||||
|
||||
/**
|
||||
* Java API: the singleton instance of the Confirmed message.
|
||||
* When the message has been processed the consumer is supposed to send `Confirmed` back
|
||||
* to the `ConsumerController` via the `confirmTo` in the [[Delivery]] message.
|
||||
*/
|
||||
def confirmed(): Confirmed = Confirmed
|
||||
|
||||
/**
|
||||
* Register the `ConsumerController` to the given `producerController`. It will
|
||||
* retry the registration until the `ProducerConsumer` has acknowledged by sending its
|
||||
* first message.
|
||||
*
|
||||
* Alternatively, this registration can be done on the producer side with the
|
||||
* [[ProducerController.RegisterConsumer]] message.
|
||||
*/
|
||||
final case class RegisterToProducerController[A](producerController: ActorRef[ProducerController.Command[A]])
|
||||
extends Command[A]
|
||||
|
||||
final case class DeliverThenStop[A]() extends Command[A]
|
||||
|
||||
/**
|
||||
* This is used between the `ProducerController` and `ConsumerController`. Should rarely be used in
|
||||
* application code but is public because it's in the signature for the `EntityTypeKey` when using
|
||||
* `ShardingConsumerController`.
|
||||
*
|
||||
* In the future we may also make the custom `send` in `ProducerController` public to make it possible to
|
||||
* wrap it or send it in other ways when building higher level abstractions that are using the `ProducerController`.
|
||||
* That is used by `ShardingProducerController`.
|
||||
*/
|
||||
final case class SequencedMessage[A](producerId: String, seqNr: SeqNr, message: A, first: Boolean, ack: Boolean)(
|
||||
/** INTERNAL API */
|
||||
@InternalApi private[akka] val producerController: ActorRef[ProducerControllerImpl.InternalCommand])
|
||||
extends Command[A]
|
||||
with DeliverySerializable
|
||||
with DeadLetterSuppression {
|
||||
|
||||
/** INTERNAL API */
|
||||
@InternalApi private[akka] def asFirst: SequencedMessage[A] =
|
||||
copy(first = true)(producerController)
|
||||
}
|
||||
|
||||
object Settings {
|
||||
|
||||
/**
|
||||
* Scala API: Factory method from config `akka.reliable-delivery.consumer-controller`
|
||||
* of the `ActorSystem`.
|
||||
*/
|
||||
def apply(system: ActorSystem[_]): Settings =
|
||||
apply(system.settings.config.getConfig("akka.reliable-delivery.consumer-controller"))
|
||||
|
||||
/**
|
||||
* Scala API: Factory method from Config corresponding to
|
||||
* `akka.reliable-delivery.consumer-controller`.
|
||||
*/
|
||||
def apply(config: Config): Settings = {
|
||||
new Settings(
|
||||
flowControlWindow = config.getInt("flow-control-window"),
|
||||
resendInterval = config.getDuration("resend-interval").asScala,
|
||||
onlyFlowControl = config.getBoolean("only-flow-control"))
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API: Factory method from config `akka.reliable-delivery.producer-controller`
|
||||
* of the `ActorSystem`.
|
||||
*/
|
||||
def create(system: ActorSystem[_]): Settings =
|
||||
apply(system)
|
||||
|
||||
/**
|
||||
* Java API: Factory method from Config corresponding to
|
||||
* `akka.reliable-delivery.producer-controller`.
|
||||
*/
|
||||
def create(config: Config): Settings =
|
||||
apply(config)
|
||||
}
|
||||
|
||||
final class Settings private (
|
||||
val flowControlWindow: Int,
|
||||
val resendInterval: FiniteDuration,
|
||||
val onlyFlowControl: Boolean) {
|
||||
|
||||
def withFlowControlWindow(newFlowControlWindow: Int): Settings =
|
||||
copy(flowControlWindow = newFlowControlWindow)
|
||||
|
||||
/**
|
||||
* Scala API
|
||||
*/
|
||||
def withResendInterval(newResendInterval: FiniteDuration): Settings =
|
||||
copy(resendInterval = newResendInterval)
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def withResendInterval(newResendInterval: JavaDuration): Settings =
|
||||
copy(resendInterval = newResendInterval.asScala)
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def getResendInterval(): JavaDuration =
|
||||
resendInterval.asJava
|
||||
|
||||
def withOnlyFlowControl(newOnlyFlowControl: Boolean): Settings =
|
||||
copy(onlyFlowControl = newOnlyFlowControl)
|
||||
|
||||
/**
|
||||
* Private copy method for internal use only.
|
||||
*/
|
||||
private def copy(
|
||||
flowControlWindow: Int = flowControlWindow,
|
||||
resendInterval: FiniteDuration = resendInterval,
|
||||
onlyFlowControl: Boolean = onlyFlowControl) =
|
||||
new Settings(flowControlWindow, resendInterval, onlyFlowControl)
|
||||
|
||||
override def toString: String =
|
||||
s"Settings($flowControlWindow, $resendInterval, $onlyFlowControl)"
|
||||
}
|
||||
|
||||
def apply[A](): Behavior[Command[A]] =
|
||||
Behaviors.setup { context =>
|
||||
apply(serviceKey = None, Settings(context.system))
|
||||
}
|
||||
|
||||
def apply[A](settings: Settings): Behavior[Command[A]] =
|
||||
apply(serviceKey = None, settings)
|
||||
|
||||
/**
|
||||
* To be used with [[WorkPullingProducerController]]. It will register itself to the
|
||||
* [[akka.actor.typed.receptionist.Receptionist]] with the given `serviceKey`, and the
|
||||
* `WorkPullingProducerController` subscribes to the same key to find active workers.
|
||||
*/
|
||||
def apply[A](serviceKey: ServiceKey[Command[A]]): Behavior[Command[A]] =
|
||||
Behaviors.setup { context =>
|
||||
apply(Some(serviceKey), Settings(context.system))
|
||||
}
|
||||
|
||||
def apply[A](serviceKey: ServiceKey[Command[A]], settings: Settings): Behavior[Command[A]] =
|
||||
apply(Some(serviceKey), settings)
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] def apply[A](
|
||||
serviceKey: Option[ServiceKey[Command[A]]],
|
||||
settings: Settings): Behavior[Command[A]] = {
|
||||
ConsumerControllerImpl(serviceKey, settings)
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def create[A](): Behavior[Command[A]] =
|
||||
apply()
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def create[A](settings: Settings): Behavior[Command[A]] =
|
||||
apply(settings)
|
||||
|
||||
/**
|
||||
* Java API: To be used with [[WorkPullingProducerController]]. It will register itself to the
|
||||
* [[akka.actor.typed.receptionist.Receptionist]] with the given `serviceKey`, and the
|
||||
* `WorkPullingProducerController` subscribes to the same key to find active workers.
|
||||
*/
|
||||
def create[A](serviceKey: ServiceKey[Command[A]]): Behavior[Command[A]] =
|
||||
apply(serviceKey)
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def create[A](serviceKey: ServiceKey[Command[A]], settings: Settings): Behavior[Command[A]] =
|
||||
apply(Some(serviceKey), settings)
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import scala.collection.immutable
|
||||
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.annotation.ApiMayChange
|
||||
import akka.annotation.InternalApi
|
||||
|
||||
import akka.actor.typed.delivery.internal.DeliverySerializable
|
||||
|
||||
/**
|
||||
* Actor message protocol for storing and confirming reliable delivery of messages. A [[akka.actor.typed.Behavior]]
|
||||
* implementation of this protocol can optionally be used with [[ProducerController]] when messages shall survive
|
||||
* a crash of the producer side.
|
||||
*
|
||||
* An implementation of this exists in `akka.persistence.typed.delivery.EventSourcedProducerQueue`.
|
||||
*/
|
||||
@ApiMayChange // TODO #28719 when removing ApiMayChange consider removing `case class` for some of the messages
|
||||
object DurableProducerQueue {
|
||||
|
||||
type SeqNr = Long
|
||||
// Timestamp in millis since epoch, System.currentTimeMillis
|
||||
type TimestampMillis = Long
|
||||
|
||||
type ConfirmationQualifier = String
|
||||
|
||||
val NoQualifier: ConfirmationQualifier = ""
|
||||
|
||||
trait Command[A]
|
||||
|
||||
/**
|
||||
* Request that is used at startup to retrieve the unconfirmed messages and current sequence number.
|
||||
*/
|
||||
final case class LoadState[A](replyTo: ActorRef[State[A]]) extends Command[A]
|
||||
|
||||
/**
|
||||
* Store the fact that a message is to be sent. Replies with [[StoreMessageSentAck]] when
|
||||
* the message has been successfully been stored.
|
||||
*
|
||||
* This command may be retied and the implementation should be idempotent, i.e. deduplicate
|
||||
* already processed sequence numbers.
|
||||
*/
|
||||
final case class StoreMessageSent[A](sent: MessageSent[A], replyTo: ActorRef[StoreMessageSentAck]) extends Command[A]
|
||||
|
||||
final case class StoreMessageSentAck(storedSeqNr: SeqNr)
|
||||
|
||||
/**
|
||||
* Store the fact that a message has been confirmed to be delivered and processed.
|
||||
*
|
||||
* This command may be retied and the implementation should be idempotent, i.e. deduplicate
|
||||
* already processed sequence numbers.
|
||||
*/
|
||||
final case class StoreMessageConfirmed[A](
|
||||
seqNr: SeqNr,
|
||||
confirmationQualifier: ConfirmationQualifier,
|
||||
timestampMillis: TimestampMillis)
|
||||
extends Command[A]
|
||||
|
||||
object State {
|
||||
def empty[A]: State[A] = State(1L, 0L, Map.empty, Vector.empty)
|
||||
}
|
||||
final case class State[A](
|
||||
currentSeqNr: SeqNr,
|
||||
highestConfirmedSeqNr: SeqNr,
|
||||
confirmedSeqNr: Map[ConfirmationQualifier, (SeqNr, TimestampMillis)],
|
||||
unconfirmed: immutable.IndexedSeq[MessageSent[A]])
|
||||
extends DeliverySerializable
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] sealed trait Event extends DeliverySerializable
|
||||
|
||||
/**
|
||||
* The fact (event) that a message has been sent.
|
||||
*/
|
||||
final case class MessageSent[A](
|
||||
seqNr: SeqNr,
|
||||
message: A,
|
||||
ack: Boolean,
|
||||
confirmationQualifier: ConfirmationQualifier,
|
||||
timestampMillis: TimestampMillis)
|
||||
extends Event
|
||||
|
||||
/**
|
||||
* INTERNAL API: The fact (event) that a message has been confirmed to be delivered and processed.
|
||||
*/
|
||||
@InternalApi private[akka] final case class Confirmed(
|
||||
seqNr: SeqNr,
|
||||
confirmationQualifier: ConfirmationQualifier,
|
||||
timestampMillis: TimestampMillis)
|
||||
extends Event
|
||||
|
||||
/**
|
||||
* INTERNAL API: Remove entries related to the confirmationQualifiers that haven't been used for a while.
|
||||
*/
|
||||
@InternalApi private[akka] final case class Cleanup(confirmationQualifiers: Set[String]) extends Event
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,263 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import java.time.{ Duration => JavaDuration }
|
||||
import java.util.Optional
|
||||
|
||||
import scala.compat.java8.OptionConverters._
|
||||
import scala.concurrent.duration._
|
||||
import scala.reflect.ClassTag
|
||||
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.ActorSystem
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.delivery.internal.DeliverySerializable
|
||||
import akka.actor.typed.delivery.internal.ProducerControllerImpl
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.annotation.ApiMayChange
|
||||
import akka.annotation.InternalApi
|
||||
import akka.util.JavaDurationConverters._
|
||||
import com.typesafe.config.Config
|
||||
|
||||
/**
|
||||
* Point-to-point reliable delivery between a single producer actor sending messages and a single consumer
|
||||
* actor receiving the messages. Used together with [[ConsumerController]].
|
||||
*
|
||||
* The producer actor will start the flow by sending a [[ProducerController.Start]] message to
|
||||
* the `ProducerController`. The `ActorRef` in the `Start` message is typically constructed
|
||||
* as a message adapter to map the [[ProducerController.RequestNext]] to the protocol of the
|
||||
* producer actor.
|
||||
*
|
||||
* For the `ProducerController` to know where to send the messages it must be connected with the
|
||||
* `ConsumerController`. You do this is with [[ProducerController.RegisterConsumer]] or
|
||||
* [[ConsumerController.RegisterToProducerController]] messages.
|
||||
*
|
||||
* The `ProducerController` sends `RequestNext` to the producer, which is then allowed to send one
|
||||
* message to the `ProducerController` via the `sendNextTo` in the `RequestNext`. Thereafter the
|
||||
* producer will receive a new `RequestNext` when it's allowed to send one more message.
|
||||
*
|
||||
* The producer and `ProducerController` actors are supposed to be local so that these messages are
|
||||
* fast and not lost. This is enforced by a runtime check.
|
||||
*
|
||||
* Many unconfirmed messages can be in flight between the `ProducerController` and `ConsumerController`.
|
||||
* The flow control is driven by the consumer side, which means that the `ProducerController` will
|
||||
* not send faster than the demand requested by the `ConsumerController`.
|
||||
*
|
||||
* Lost messages are detected, resent and deduplicated if needed. This is also driven by the consumer side,
|
||||
* which means that the `ProducerController` will not push resends unless requested by the
|
||||
* `ConsumerController`.
|
||||
*
|
||||
* Until sent messages have been confirmed the `ProducerController` keeps them in memory to be able to
|
||||
* resend them. If the JVM of the `ProducerController` crashes those unconfirmed messages are lost.
|
||||
* To make sure the messages can be delivered also in that scenario the `ProducerController` can be
|
||||
* used with a [[DurableProducerQueue]]. Then the unconfirmed messages are stored in a durable way so
|
||||
* that they can be redelivered when the producer is started again. An implementation of the
|
||||
* `DurableProducerQueue` is provided by `EventSourcedProducerQueue` in `akka-persistence-typed`.
|
||||
*
|
||||
* Instead of using `tell` with the `sendNextTo` in the `RequestNext` the producer can use `context.ask`
|
||||
* with the `askNextTo` in the `RequestNext`. The difference is that a reply is sent back when the
|
||||
* message has been handled. If a `DurableProducerQueue` is used then the reply is sent when the message
|
||||
* has been stored successfully, but it might not have been processed by the consumer yet. Otherwise the
|
||||
* reply is sent after the consumer has processed and confirmed the message.
|
||||
*
|
||||
* If the consumer crashes a new `ConsumerController` can be connected to the original `ProducerConsumer`
|
||||
* without restarting it. The `ProducerConsumer` will then redeliver all unconfirmed messages.
|
||||
*
|
||||
* It's also possible to use the `ProducerController` and `ConsumerController` without resending
|
||||
* lost messages, but the flow control is still used. This can for example be useful when both consumer and
|
||||
* producer are know to be located in the same local `ActorSystem`. This can be more efficient since messages
|
||||
* don't have to be kept in memory in the `ProducerController` until they have been
|
||||
* confirmed, but the drawback is that lost messages will not be delivered. See configuration
|
||||
* `only-flow-control` of the `ConsumerController`.
|
||||
*
|
||||
* The `producerId` is used in logging and included as MDC entry with key `"producerId"`. It's propagated
|
||||
* to the `ConsumerController` and is useful for correlating log messages. It can be any `String` but it's
|
||||
* recommended to use a unique identifier of representing the producer.
|
||||
*/
|
||||
@ApiMayChange // TODO #28719 when removing ApiMayChange consider removing `case class` for some of the messages
|
||||
object ProducerController {
|
||||
import ProducerControllerImpl.UnsealedInternalCommand
|
||||
|
||||
type SeqNr = Long
|
||||
|
||||
sealed trait Command[A] extends UnsealedInternalCommand
|
||||
|
||||
/**
|
||||
* Initial message from the producer actor. The `producer` is typically constructed
|
||||
* as a message adapter to map the [[RequestNext]] to the protocol of the producer actor.
|
||||
*
|
||||
* If the producer is restarted it should send a new `Start` message to the
|
||||
* `ProducerController`.
|
||||
*/
|
||||
final case class Start[A](producer: ActorRef[RequestNext[A]]) extends Command[A]
|
||||
|
||||
/**
|
||||
* The `ProducerController` sends `RequestNext` to the producer when it is allowed to send one
|
||||
* message via the `sendNextTo` or `askNextTo`. Note that only one message is allowed, and then
|
||||
* it must wait for next `RequestNext` before sending one more message.
|
||||
*/
|
||||
final case class RequestNext[A](
|
||||
producerId: String,
|
||||
currentSeqNr: SeqNr,
|
||||
confirmedSeqNr: SeqNr,
|
||||
sendNextTo: ActorRef[A],
|
||||
askNextTo: ActorRef[MessageWithConfirmation[A]])
|
||||
|
||||
/**
|
||||
* Java API: The generic `Class` type for `ProducerController.RequestNext` that can be used when creating a
|
||||
* `messageAdapter` for `Class<RequestNext<MessageType>>`.
|
||||
*/
|
||||
def requestNextClass[A](): Class[RequestNext[A]] = classOf[RequestNext[A]]
|
||||
|
||||
/**
|
||||
* For sending confirmation message back to the producer when the message has been confirmed.
|
||||
* Typically used with `context.ask` from the producer.
|
||||
*
|
||||
* If `DurableProducerQueue` is used the confirmation reply is sent when the message has been
|
||||
* successfully stored, meaning that the actual delivery to the consumer may happen later.
|
||||
* If `DurableProducerQueue` is not used the confirmation reply is sent when the message has been
|
||||
* fully delivered, processed, and confirmed by the consumer.
|
||||
*/
|
||||
final case class MessageWithConfirmation[A](message: A, replyTo: ActorRef[SeqNr]) extends UnsealedInternalCommand
|
||||
|
||||
/**
|
||||
* Register the given `consumerController` to the `ProducerController`.
|
||||
*
|
||||
* Alternatively, this registration can be done on the consumer side with the
|
||||
* [[ConsumerController.RegisterToProducerController]] message.
|
||||
*
|
||||
* When using a custom `send` function for the `ProducerController` this should not be used.
|
||||
*/
|
||||
final case class RegisterConsumer[A](consumerController: ActorRef[ConsumerController.Command[A]])
|
||||
extends Command[A]
|
||||
with DeliverySerializable
|
||||
|
||||
object Settings {
|
||||
|
||||
/**
|
||||
* Scala API: Factory method from config `akka.reliable-delivery.producer-controller`
|
||||
* of the `ActorSystem`.
|
||||
*/
|
||||
def apply(system: ActorSystem[_]): Settings =
|
||||
apply(system.settings.config.getConfig("akka.reliable-delivery.producer-controller"))
|
||||
|
||||
/**
|
||||
* Scala API: Factory method from Config corresponding to
|
||||
* `akka.reliable-delivery.producer-controller`.
|
||||
*/
|
||||
def apply(config: Config): Settings = {
|
||||
new Settings(
|
||||
durableQueueRequestTimeout = config.getDuration("durable-queue.request-timeout").asScala,
|
||||
durableQueueRetryAttempts = config.getInt("durable-queue.retry-attempts"))
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API: Factory method from config `akka.reliable-delivery.producer-controller`
|
||||
* of the `ActorSystem`.
|
||||
*/
|
||||
def create(system: ActorSystem[_]): Settings =
|
||||
apply(system)
|
||||
|
||||
/**
|
||||
* Java API: Factory method from Config corresponding to
|
||||
* `akka.reliable-delivery.producer-controller`.
|
||||
*/
|
||||
def create(config: Config): Settings =
|
||||
apply(config)
|
||||
}
|
||||
|
||||
final class Settings private (val durableQueueRequestTimeout: FiniteDuration, val durableQueueRetryAttempts: Int) {
|
||||
|
||||
def withDurableQueueRetryAttempts(newDurableQueueRetryAttempts: Int): Settings =
|
||||
copy(durableQueueRetryAttempts = newDurableQueueRetryAttempts)
|
||||
|
||||
/**
|
||||
* Scala API
|
||||
*/
|
||||
def withDurableQueueRequestTimeout(newDurableQueueRequestTimeout: FiniteDuration): Settings =
|
||||
copy(durableQueueRequestTimeout = newDurableQueueRequestTimeout)
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def withDurableQueueRequestTimeout(newDurableQueueRequestTimeout: JavaDuration): Settings =
|
||||
copy(durableQueueRequestTimeout = newDurableQueueRequestTimeout.asScala)
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def getDurableQueueRequestTimeout(): JavaDuration =
|
||||
durableQueueRequestTimeout.asJava
|
||||
|
||||
/**
|
||||
* Private copy method for internal use only.
|
||||
*/
|
||||
private def copy(
|
||||
durableQueueRequestTimeout: FiniteDuration = durableQueueRequestTimeout,
|
||||
durableQueueRetryAttempts: Int = durableQueueRetryAttempts) =
|
||||
new Settings(durableQueueRequestTimeout, durableQueueRetryAttempts)
|
||||
|
||||
override def toString: String =
|
||||
s"Settings($durableQueueRequestTimeout, $durableQueueRetryAttempts)"
|
||||
}
|
||||
|
||||
def apply[A: ClassTag](
|
||||
producerId: String,
|
||||
durableQueueBehavior: Option[Behavior[DurableProducerQueue.Command[A]]]): Behavior[Command[A]] = {
|
||||
Behaviors.setup { context =>
|
||||
ProducerControllerImpl(producerId, durableQueueBehavior, ProducerController.Settings(context.system))
|
||||
}
|
||||
}
|
||||
|
||||
def apply[A: ClassTag](
|
||||
producerId: String,
|
||||
durableQueueBehavior: Option[Behavior[DurableProducerQueue.Command[A]]],
|
||||
settings: Settings): Behavior[Command[A]] = {
|
||||
ProducerControllerImpl(producerId, durableQueueBehavior, settings)
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
* For custom `send` function. For example used with Sharding where the message must be wrapped in
|
||||
* `ShardingEnvelope(SequencedMessage(msg))`.
|
||||
*
|
||||
* When this factory is used the [[RegisterConsumer]] is not needed.
|
||||
*
|
||||
* In the future we may make the custom `send` in `ProducerController` public to make it possible to
|
||||
* wrap it or send it in other ways when building higher level abstractions that are using the `ProducerController`.
|
||||
* That is used by `ShardingProducerController`.
|
||||
*/
|
||||
@InternalApi private[akka] def apply[A: ClassTag](
|
||||
producerId: String,
|
||||
durableQueueBehavior: Option[Behavior[DurableProducerQueue.Command[A]]],
|
||||
settings: Settings,
|
||||
send: ConsumerController.SequencedMessage[A] => Unit): Behavior[Command[A]] = {
|
||||
ProducerControllerImpl(producerId, durableQueueBehavior, settings, send)
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def create[A](
|
||||
messageClass: Class[A],
|
||||
producerId: String,
|
||||
durableQueueBehavior: Optional[Behavior[DurableProducerQueue.Command[A]]]): Behavior[Command[A]] = {
|
||||
apply(producerId, durableQueueBehavior.asScala)(ClassTag(messageClass))
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def create[A](
|
||||
messageClass: Class[A],
|
||||
producerId: String,
|
||||
durableQueueBehavior: Optional[Behavior[DurableProducerQueue.Command[A]]],
|
||||
settings: Settings): Behavior[Command[A]] = {
|
||||
apply(producerId, durableQueueBehavior.asScala, settings)(ClassTag(messageClass))
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,240 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery
|
||||
|
||||
import java.util.Optional
|
||||
|
||||
import scala.reflect.ClassTag
|
||||
import scala.compat.java8.OptionConverters._
|
||||
import scala.concurrent.duration.FiniteDuration
|
||||
|
||||
import akka.Done
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.ActorSystem
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.delivery.internal.WorkPullingProducerControllerImpl
|
||||
import akka.actor.typed.receptionist.ServiceKey
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.annotation.ApiMayChange
|
||||
import akka.util.JavaDurationConverters._
|
||||
import com.typesafe.config.Config
|
||||
|
||||
/**
|
||||
* Work pulling is a pattern where several worker actors pull tasks in their own pace from
|
||||
* a shared work manager instead of that the manager pushes work to the workers blindly
|
||||
* without knowing their individual capacity and current availability.
|
||||
*
|
||||
* The `WorkPullingProducerController` can be used together with [[ConsumerController]] to
|
||||
* implement the work pulling pattern.
|
||||
*
|
||||
* One important property is that the order of the messages should not matter, because each
|
||||
* message is routed randomly to one of the workers with demand. In other words, two subsequent
|
||||
* messages may be routed to two different workers and processed independent of each other.
|
||||
*
|
||||
* A worker actor (consumer) and its `ConsumerController` is dynamically registered to the
|
||||
* `WorkPullingProducerController` via a [[ServiceKey]]. It will register itself to the
|
||||
* * [[akka.actor.typed.receptionist.Receptionist]], and the `WorkPullingProducerController`
|
||||
* subscribes to the same key to find active workers. In this way workers can be dynamically
|
||||
* added or removed from any node in the cluster.
|
||||
*
|
||||
* The work manager (producer) actor will start the flow by sending a [[WorkPullingProducerController.Start]]
|
||||
* message to the `WorkPullingProducerController`. The `ActorRef` in the `Start` message is
|
||||
* typically constructed as a message adapter to map the [[WorkPullingProducerController.RequestNext]]
|
||||
* to the protocol of the producer actor.
|
||||
*
|
||||
* The `WorkPullingProducerController` sends `RequestNext` to the producer, which is then allowed
|
||||
* to send one message to the `WorkPullingProducerController` via the `sendNextTo` in the `RequestNext`.
|
||||
* Thereafter the producer will receive a new `RequestNext` when it's allowed to send one more message.
|
||||
* It will send a new `RequestNext` when there are demand from any worker.
|
||||
* It's possible that all workers with demand are deregistered after the `RequestNext` is sent and before
|
||||
* the actual messages is sent to the `WorkPullingProducerController`. In that case the message is
|
||||
* buffered and will be delivered when a new worker is registered or when there is new demand.
|
||||
*
|
||||
* The producer and `WorkPullingProducerController` actors are supposed to be local so that these messages are
|
||||
* fast and not lost. This is enforced by a runtime check.
|
||||
*
|
||||
* Many unconfirmed messages can be in flight between the `WorkPullingProducerController` and each
|
||||
* `ConsumerController`. The flow control is driven by the consumer side, which means that the
|
||||
* `WorkPullingProducerController` will not send faster than the demand requested by the workers.
|
||||
*
|
||||
* Lost messages are detected, resent and deduplicated if needed. This is also driven by the consumer side,
|
||||
* which means that the `WorkPullingProducerController` will not push resends unless requested by the
|
||||
* `ConsumerController`.
|
||||
*
|
||||
* If a worker crashes or is stopped gracefully the unconfirmed messages for that worker will be
|
||||
* routed to other workers by the `WorkPullingProducerController`. This may result in that some messages
|
||||
* may be processed more than once, by different workers.
|
||||
*
|
||||
* Until sent messages have been confirmed the `WorkPullingProducerController` keeps them in memory to be able to
|
||||
* resend them. If the JVM of the `WorkPullingProducerController` crashes those unconfirmed messages are lost.
|
||||
* To make sure the messages can be delivered also in that scenario the `WorkPullingProducerController` can be
|
||||
* used with a [[DurableProducerQueue]]. Then the unconfirmed messages are stored in a durable way so
|
||||
* that they can be redelivered when the producer is started again. An implementation of the
|
||||
* `DurableProducerQueue` is provided by `EventSourcedProducerQueue` in `akka-persistence-typed`.
|
||||
*
|
||||
* Instead of using `tell` with the `sendNextTo` in the `RequestNext` the producer can use `context.ask`
|
||||
* with the `askNextTo` in the `RequestNext`. The difference is that a reply is sent back when the
|
||||
* message has been handled. If a `DurableProducerQueue` is used then the reply is sent when the message
|
||||
* has been stored successfully, but it might not have been processed by the consumer yet. Otherwise the
|
||||
* reply is sent after the consumer has processed and confirmed the message.
|
||||
*
|
||||
* It's also possible to use the `WorkPullingProducerController` and `ConsumerController` without resending
|
||||
* lost messages, but the flow control is still used. This can for example be useful when both consumer and
|
||||
* producer are know to be located in the same local `ActorSystem`. This can be more efficient since messages
|
||||
* don't have to be kept in memory in the `ProducerController` until they have been
|
||||
* confirmed, but the drawback is that lost messages will not be delivered. See configuration
|
||||
* `only-flow-control` of the `ConsumerController`.
|
||||
*
|
||||
* The `producerId` is used in logging and included as MDC entry with key `"producerId"`. It's propagated
|
||||
* to the `ConsumerController` and is useful for correlating log messages. It can be any `String` but it's
|
||||
* recommended to use a unique identifier of representing the producer.
|
||||
*/
|
||||
@ApiMayChange // TODO #28719 when removing ApiMayChange consider removing `case class` for some of the messages
|
||||
object WorkPullingProducerController {
|
||||
|
||||
import WorkPullingProducerControllerImpl.UnsealedInternalCommand
|
||||
|
||||
sealed trait Command[A] extends UnsealedInternalCommand
|
||||
|
||||
/**
|
||||
* Initial message from the producer actor. The `producer` is typically constructed
|
||||
* as a message adapter to map the [[RequestNext]] to the protocol of the producer actor.
|
||||
*
|
||||
* If the producer is restarted it should send a new `Start` message to the
|
||||
* `WorkPullingProducerController`.
|
||||
*/
|
||||
final case class Start[A](producer: ActorRef[RequestNext[A]]) extends Command[A]
|
||||
|
||||
/**
|
||||
* The `WorkPullingProducerController` sends `RequestNext` to the producer when it is allowed to send one
|
||||
* message via the `sendNextTo` or `askNextTo`. Note that only one message is allowed, and then
|
||||
* it must wait for next `RequestNext` before sending one more message.
|
||||
*/
|
||||
final case class RequestNext[A](sendNextTo: ActorRef[A], askNextTo: ActorRef[MessageWithConfirmation[A]])
|
||||
|
||||
/**
|
||||
* Java API: The generic `Class` type for `WorkPullingProducerController.RequestNext` that can be used when
|
||||
* creating a `messageAdapter` for `Class<RequestNext<MessageType>>`.
|
||||
*/
|
||||
def requestNextClass[A](): Class[RequestNext[A]] = classOf[RequestNext[A]]
|
||||
|
||||
/**
|
||||
* For sending confirmation message back to the producer when the message has been fully delivered, processed,
|
||||
* and confirmed by the consumer. Typically used with `context.ask` from the producer.
|
||||
*/
|
||||
final case class MessageWithConfirmation[A](message: A, replyTo: ActorRef[Done]) extends UnsealedInternalCommand
|
||||
|
||||
/**
|
||||
* Retrieve information about registered workers.
|
||||
*/
|
||||
final case class GetWorkerStats[A](replyTo: ActorRef[WorkerStats]) extends Command[A]
|
||||
|
||||
final case class WorkerStats(numberOfWorkers: Int)
|
||||
|
||||
object Settings {
|
||||
|
||||
/**
|
||||
* Scala API: Factory method from config `akka.reliable-delivery.work-pulling.producer-controller`
|
||||
* of the `ActorSystem`.
|
||||
*/
|
||||
def apply(system: ActorSystem[_]): Settings =
|
||||
apply(system.settings.config.getConfig("akka.reliable-delivery.work-pulling.producer-controller"))
|
||||
|
||||
/**
|
||||
* Scala API: Factory method from Config corresponding to
|
||||
* `akka.reliable-delivery.work-pulling.producer-controller`.
|
||||
*/
|
||||
def apply(config: Config): Settings = {
|
||||
new Settings(
|
||||
bufferSize = config.getInt("buffer-size"),
|
||||
config.getDuration("internal-ask-timeout").asScala,
|
||||
ProducerController.Settings(config))
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API: Factory method from config `akka.reliable-delivery.work-pulling.producer-controller`
|
||||
* of the `ActorSystem`.
|
||||
*/
|
||||
def create(system: ActorSystem[_]): Settings =
|
||||
apply(system)
|
||||
|
||||
/**
|
||||
* Java API: Factory method from Config corresponding to
|
||||
* `akka.reliable-delivery.work-pulling.producer-controller`.
|
||||
*/
|
||||
def create(config: Config): Settings =
|
||||
apply(config)
|
||||
}
|
||||
|
||||
final class Settings private (
|
||||
val bufferSize: Int,
|
||||
val internalAskTimeout: FiniteDuration,
|
||||
val producerControllerSettings: ProducerController.Settings) {
|
||||
|
||||
def withBufferSize(newBufferSize: Int): Settings =
|
||||
copy(bufferSize = newBufferSize)
|
||||
|
||||
def withInternalAskTimeout(newInternalAskTimeout: FiniteDuration): Settings =
|
||||
copy(internalAskTimeout = newInternalAskTimeout)
|
||||
|
||||
def withInternalAskTimeout(newInternalAskTimeout: java.time.Duration): Settings =
|
||||
copy(internalAskTimeout = newInternalAskTimeout.asScala)
|
||||
|
||||
def withProducerControllerSettings(newProducerControllerSettings: ProducerController.Settings): Settings =
|
||||
copy(producerControllerSettings = newProducerControllerSettings)
|
||||
|
||||
/**
|
||||
* Private copy method for internal use only.
|
||||
*/
|
||||
private def copy(
|
||||
bufferSize: Int = bufferSize,
|
||||
internalAskTimeout: FiniteDuration = internalAskTimeout,
|
||||
producerControllerSettings: ProducerController.Settings = producerControllerSettings) =
|
||||
new Settings(bufferSize, internalAskTimeout, producerControllerSettings)
|
||||
|
||||
override def toString: String =
|
||||
s"Settings($bufferSize,$internalAskTimeout,$producerControllerSettings)"
|
||||
}
|
||||
|
||||
def apply[A: ClassTag](
|
||||
producerId: String,
|
||||
workerServiceKey: ServiceKey[ConsumerController.Command[A]],
|
||||
durableQueueBehavior: Option[Behavior[DurableProducerQueue.Command[A]]]): Behavior[Command[A]] = {
|
||||
Behaviors.setup { context =>
|
||||
WorkPullingProducerControllerImpl(producerId, workerServiceKey, durableQueueBehavior, Settings(context.system))
|
||||
}
|
||||
}
|
||||
|
||||
def apply[A: ClassTag](
|
||||
producerId: String,
|
||||
workerServiceKey: ServiceKey[ConsumerController.Command[A]],
|
||||
durableQueueBehavior: Option[Behavior[DurableProducerQueue.Command[A]]],
|
||||
settings: Settings): Behavior[Command[A]] = {
|
||||
WorkPullingProducerControllerImpl(producerId, workerServiceKey, durableQueueBehavior, settings)
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def create[A](
|
||||
messageClass: Class[A],
|
||||
producerId: String,
|
||||
workerServiceKey: ServiceKey[ConsumerController.Command[A]],
|
||||
durableQueueBehavior: Optional[Behavior[DurableProducerQueue.Command[A]]]): Behavior[Command[A]] = {
|
||||
apply(producerId, workerServiceKey, durableQueueBehavior.asScala)(ClassTag(messageClass))
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def apply[A: ClassTag](
|
||||
messageClass: Class[A],
|
||||
producerId: String,
|
||||
workerServiceKey: ServiceKey[ConsumerController.Command[A]],
|
||||
durableQueueBehavior: Optional[Behavior[DurableProducerQueue.Command[A]]],
|
||||
settings: Settings): Behavior[Command[A]] = {
|
||||
apply(producerId, workerServiceKey, durableQueueBehavior.asScala, settings)(ClassTag(messageClass))
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,586 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery.internal
|
||||
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.PostStop
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.delivery.ConsumerController.DeliverThenStop
|
||||
import akka.actor.typed.delivery.ProducerController
|
||||
import akka.actor.typed.receptionist.Receptionist
|
||||
import akka.actor.typed.receptionist.ServiceKey
|
||||
import akka.actor.typed.scaladsl.ActorContext
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.actor.typed.scaladsl.LoggerOps
|
||||
import akka.actor.typed.scaladsl.StashBuffer
|
||||
import akka.actor.typed.scaladsl.TimerScheduler
|
||||
import akka.annotation.InternalApi
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
* ==== Design notes ====
|
||||
*
|
||||
* The destination consumer will start the flow by sending an initial `Start` message
|
||||
* to the `ConsumerController`.
|
||||
*
|
||||
* The `ProducerController` sends the first message to the `ConsumerController` without waiting for
|
||||
* a `Request` from the `ConsumerController`. The main reason for this is that when used with
|
||||
* Cluster Sharding the first message will typically create the `ConsumerController`. It's
|
||||
* also a way to connect the ProducerController and ConsumerController in a dynamic way, for
|
||||
* example when the ProducerController is replaced.
|
||||
*
|
||||
* The `ConsumerController` sends [[ProducerControllerImpl.Request]] to the `ProducerController`
|
||||
* to specify it's ready to receive up to the requested sequence number.
|
||||
*
|
||||
* The `ConsumerController` sends the first `Request` when it receives the first `SequencedMessage`
|
||||
* and has received the `Start` message from the consumer.
|
||||
*
|
||||
* It sends new `Request` when half of the requested window is remaining, but it also retries
|
||||
* the `Request` if no messages are received because that could be caused by lost messages.
|
||||
*
|
||||
* Apart from the first message the producer will not send more messages than requested.
|
||||
*
|
||||
* Received messages are wrapped in [[ConsumerController.Delivery]] when sent to the consumer,
|
||||
* which is supposed to reply with [[ConsumerController.Confirmed]] when it has processed the message.
|
||||
* Next message is not delivered until the previous is confirmed.
|
||||
* More messages from the producer that arrive while waiting for the confirmation are stashed by
|
||||
* the `ConsumerController` and delivered when previous message was confirmed.
|
||||
*
|
||||
* In other words, the "request" protocol to the application producer and consumer is one-by-one, but
|
||||
* between the `ProducerController` and `ConsumerController` it's window of messages in flight.
|
||||
*
|
||||
* The consumer and the `ConsumerController` are supposed to be local so that these messages are fast and not lost.
|
||||
*
|
||||
* If the `ConsumerController` receives a message with unexpected sequence number (not previous + 1)
|
||||
* it sends [[ProducerControllerImpl.Resend]] to the `ProducerController` and will ignore all messages until
|
||||
* the expected sequence number arrives.
|
||||
*/
|
||||
@InternalApi private[akka] object ConsumerControllerImpl {
|
||||
import ConsumerController.Command
|
||||
import ConsumerController.RegisterToProducerController
|
||||
import ConsumerController.SeqNr
|
||||
import ConsumerController.SequencedMessage
|
||||
import ConsumerController.Start
|
||||
|
||||
sealed trait InternalCommand
|
||||
|
||||
/** For commands defined in public ConsumerController */
|
||||
trait UnsealedInternalCommand extends InternalCommand
|
||||
|
||||
private final case object Retry extends InternalCommand
|
||||
|
||||
private final case class ConsumerTerminated(consumer: ActorRef[_]) extends InternalCommand
|
||||
|
||||
private final case class State[A](
|
||||
producerController: ActorRef[ProducerControllerImpl.InternalCommand],
|
||||
consumer: ActorRef[ConsumerController.Delivery[A]],
|
||||
receivedSeqNr: SeqNr,
|
||||
confirmedSeqNr: SeqNr,
|
||||
requestedSeqNr: SeqNr,
|
||||
registering: Option[ActorRef[ProducerController.Command[A]]],
|
||||
stopping: Boolean) {
|
||||
|
||||
def isNextExpected(seqMsg: SequencedMessage[A]): Boolean =
|
||||
seqMsg.seqNr == receivedSeqNr + 1
|
||||
|
||||
def isProducerChanged(seqMsg: SequencedMessage[A]): Boolean =
|
||||
seqMsg.producerController != producerController || receivedSeqNr == 0
|
||||
|
||||
def updatedRegistering(seqMsg: SequencedMessage[A]): Option[ActorRef[ProducerController.Command[A]]] = {
|
||||
registering match {
|
||||
case None => None
|
||||
case s @ Some(reg) => if (seqMsg.producerController == reg) None else s
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def apply[A](
|
||||
serviceKey: Option[ServiceKey[Command[A]]],
|
||||
settings: ConsumerController.Settings): Behavior[Command[A]] = {
|
||||
Behaviors
|
||||
.withStash[InternalCommand](settings.flowControlWindow) { stashBuffer =>
|
||||
Behaviors.setup { context =>
|
||||
Behaviors.withMdc(msg => mdcForMessage(msg)) {
|
||||
context.setLoggerName("akka.actor.typed.delivery.ConsumerController")
|
||||
serviceKey.foreach { key =>
|
||||
context.system.receptionist ! Receptionist.Register(key, context.self)
|
||||
}
|
||||
Behaviors.withTimers { timers =>
|
||||
// wait for the `Start` message from the consumer, SequencedMessage will be stashed
|
||||
def waitForStart(
|
||||
registering: Option[ActorRef[ProducerController.Command[A]]]): Behavior[InternalCommand] = {
|
||||
Behaviors.receiveMessagePartial {
|
||||
case reg: RegisterToProducerController[A] @unchecked =>
|
||||
reg.producerController ! ProducerController.RegisterConsumer(context.self)
|
||||
waitForStart(Some(reg.producerController))
|
||||
|
||||
case s: Start[A] @unchecked =>
|
||||
ConsumerControllerImpl.enforceLocalConsumer(s.deliverTo)
|
||||
context.watchWith(s.deliverTo, ConsumerTerminated(s.deliverTo))
|
||||
|
||||
val activeBehavior =
|
||||
new ConsumerControllerImpl[A](context, timers, stashBuffer, settings)
|
||||
.active(initialState(context, s, registering))
|
||||
context.log.debug("Received Start, unstash [{}] messages.", stashBuffer.size)
|
||||
stashBuffer.unstashAll(activeBehavior)
|
||||
|
||||
case seqMsg: SequencedMessage[A] @unchecked =>
|
||||
stashBuffer.stash(seqMsg)
|
||||
Behaviors.same
|
||||
|
||||
case d: DeliverThenStop[_] =>
|
||||
if (stashBuffer.isEmpty) {
|
||||
Behaviors.stopped
|
||||
} else {
|
||||
stashBuffer.stash(d)
|
||||
Behaviors.same
|
||||
}
|
||||
|
||||
case Retry =>
|
||||
registering.foreach { reg =>
|
||||
context.log.debug("Retry sending RegisterConsumer to [{}].", reg)
|
||||
reg ! ProducerController.RegisterConsumer(context.self)
|
||||
}
|
||||
Behaviors.same
|
||||
|
||||
case ConsumerTerminated(c) =>
|
||||
context.log.debug("Consumer [{}] terminated.", c)
|
||||
Behaviors.stopped
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
timers.startTimerWithFixedDelay(Retry, Retry, settings.resendInterval)
|
||||
waitForStart(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
.narrow // expose Command, but not InternalCommand
|
||||
}
|
||||
|
||||
private def mdcForMessage(msg: InternalCommand): Map[String, String] = {
|
||||
msg match {
|
||||
case seqMsg: SequencedMessage[_] => Map("producerId" -> seqMsg.producerId)
|
||||
case _ => Map.empty
|
||||
}
|
||||
}
|
||||
|
||||
private def initialState[A](
|
||||
context: ActorContext[InternalCommand],
|
||||
start: Start[A],
|
||||
registering: Option[ActorRef[ProducerController.Command[A]]]): State[A] = {
|
||||
State(
|
||||
producerController = context.system.deadLetters,
|
||||
start.deliverTo,
|
||||
receivedSeqNr = 0,
|
||||
confirmedSeqNr = 0,
|
||||
requestedSeqNr = 0,
|
||||
registering,
|
||||
stopping = false)
|
||||
}
|
||||
|
||||
def enforceLocalConsumer(ref: ActorRef[_]): Unit = {
|
||||
if (ref.path.address.hasGlobalScope)
|
||||
throw new IllegalArgumentException(s"Consumer [$ref] should be local.")
|
||||
}
|
||||
}
|
||||
|
||||
private class ConsumerControllerImpl[A](
|
||||
context: ActorContext[ConsumerControllerImpl.InternalCommand],
|
||||
timers: TimerScheduler[ConsumerControllerImpl.InternalCommand],
|
||||
stashBuffer: StashBuffer[ConsumerControllerImpl.InternalCommand],
|
||||
settings: ConsumerController.Settings) {
|
||||
|
||||
import ConsumerController.Confirmed
|
||||
import ConsumerController.Delivery
|
||||
import ConsumerController.RegisterToProducerController
|
||||
import ConsumerController.SequencedMessage
|
||||
import ConsumerController.Start
|
||||
import ConsumerControllerImpl._
|
||||
import ProducerControllerImpl.Ack
|
||||
import ProducerControllerImpl.Request
|
||||
import ProducerControllerImpl.Resend
|
||||
import settings.flowControlWindow
|
||||
|
||||
startRetryTimer()
|
||||
|
||||
private def resendLost = !settings.onlyFlowControl
|
||||
|
||||
// Expecting a SequencedMessage from ProducerController, that will be delivered to the consumer if
|
||||
// the seqNr is right.
|
||||
private def active(s: State[A]): Behavior[InternalCommand] = {
|
||||
Behaviors
|
||||
.receiveMessage[InternalCommand] {
|
||||
case seqMsg: SequencedMessage[A] =>
|
||||
val pid = seqMsg.producerId
|
||||
val seqNr = seqMsg.seqNr
|
||||
val expectedSeqNr = s.receivedSeqNr + 1
|
||||
|
||||
if (s.isProducerChanged(seqMsg)) {
|
||||
if (seqMsg.first)
|
||||
context.log.trace("Received first SequencedMessage seqNr [{}], delivering to consumer.", seqNr)
|
||||
receiveChangedProducer(s, seqMsg)
|
||||
} else if (s.registering.isDefined) {
|
||||
context.log.debug(
|
||||
"Received SequencedMessage seqNr [{}], discarding message because registering to new ProducerController.",
|
||||
seqNr)
|
||||
Behaviors.same
|
||||
} else if (s.isNextExpected(seqMsg)) {
|
||||
context.log.trace("Received SequencedMessage seqNr [{}], delivering to consumer.", seqNr)
|
||||
deliver(s.copy(receivedSeqNr = seqNr), seqMsg)
|
||||
} else if (seqNr > expectedSeqNr) {
|
||||
context.log.debugN(
|
||||
"Received SequencedMessage seqNr [{}], but expected [{}], {}.",
|
||||
seqNr,
|
||||
expectedSeqNr,
|
||||
if (resendLost) "requesting resend from expected seqNr" else "delivering to consumer anyway")
|
||||
if (resendLost) {
|
||||
seqMsg.producerController ! Resend(fromSeqNr = expectedSeqNr)
|
||||
resending(s)
|
||||
} else {
|
||||
s.consumer ! Delivery(seqMsg.message, context.self, pid, seqNr)
|
||||
waitingForConfirmation(s.copy(receivedSeqNr = seqNr), seqMsg)
|
||||
}
|
||||
} else { // seqNr < expectedSeqNr
|
||||
context.log.debug2("Received duplicate SequencedMessage seqNr [{}], expected [{}].", seqNr, expectedSeqNr)
|
||||
if (seqMsg.first)
|
||||
active(retryRequest(s))
|
||||
else
|
||||
Behaviors.same
|
||||
}
|
||||
|
||||
case Retry =>
|
||||
receiveRetry(s, () => active(retryRequest(s)))
|
||||
|
||||
case Confirmed =>
|
||||
receiveUnexpectedConfirmed()
|
||||
|
||||
case start: Start[A] =>
|
||||
receiveStart(s, start, newState => active(newState))
|
||||
|
||||
case ConsumerTerminated(c) =>
|
||||
receiveConsumerTerminated(c)
|
||||
|
||||
case reg: RegisterToProducerController[A] =>
|
||||
receiveRegisterToProducerController(s, reg, newState => active(newState))
|
||||
|
||||
case _: DeliverThenStop[_] =>
|
||||
receiveDeliverThenStop(s, newState => active(newState))
|
||||
|
||||
case _: UnsealedInternalCommand =>
|
||||
Behaviors.unhandled
|
||||
}
|
||||
.receiveSignal {
|
||||
case (_, PostStop) => postStop(s)
|
||||
}
|
||||
}
|
||||
|
||||
private def receiveChangedProducer(s: State[A], seqMsg: SequencedMessage[A]): Behavior[InternalCommand] = {
|
||||
val seqNr = seqMsg.seqNr
|
||||
|
||||
if (seqMsg.first || !resendLost) {
|
||||
logChangedProducer(s, seqMsg)
|
||||
|
||||
val newRequestedSeqNr = seqMsg.seqNr - 1 + flowControlWindow
|
||||
context.log.debug("Sending Request with requestUpToSeqNr [{}] after first SequencedMessage.", newRequestedSeqNr)
|
||||
seqMsg.producerController ! Request(confirmedSeqNr = 0L, newRequestedSeqNr, resendLost, viaTimeout = false)
|
||||
|
||||
deliver(
|
||||
s.copy(
|
||||
producerController = seqMsg.producerController,
|
||||
receivedSeqNr = seqNr,
|
||||
confirmedSeqNr = 0L,
|
||||
requestedSeqNr = newRequestedSeqNr,
|
||||
registering = s.updatedRegistering(seqMsg)),
|
||||
seqMsg)
|
||||
} else if (s.receivedSeqNr == 0) {
|
||||
// needed for sharding
|
||||
context.log.debug(
|
||||
"Received SequencedMessage seqNr [{}], from new producer producer [{}] but it wasn't first. Resending.",
|
||||
seqNr,
|
||||
seqMsg.producerController)
|
||||
// request resend of all unconfirmed, and mark first
|
||||
seqMsg.producerController ! Resend(0)
|
||||
resending(s)
|
||||
} else {
|
||||
context.log.warnN(
|
||||
"Received SequencedMessage seqNr [{}], discarding message because it was from unexpected " +
|
||||
"producer [{}] when expecting [{}].",
|
||||
seqNr,
|
||||
seqMsg.producerController,
|
||||
s.producerController)
|
||||
Behaviors.same
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private def logChangedProducer(s: State[A], seqMsg: SequencedMessage[A]): Unit = {
|
||||
if (s.producerController == context.system.deadLetters) {
|
||||
context.log.debugN(
|
||||
"Associated with new ProducerController [{}], seqNr [{}].",
|
||||
seqMsg.producerController,
|
||||
seqMsg.seqNr)
|
||||
} else {
|
||||
context.log.debugN(
|
||||
"Changing ProducerController from [{}] to [{}], seqNr [{}].",
|
||||
s.producerController,
|
||||
seqMsg.producerController,
|
||||
seqMsg.seqNr)
|
||||
}
|
||||
}
|
||||
|
||||
// It has detected a missing seqNr and requested a Resend. Expecting a SequencedMessage from the
|
||||
// ProducerController with the missing seqNr. Other SequencedMessage with different seqNr will be
|
||||
// discarded since they were in flight before the Resend request and will anyway be sent again.
|
||||
private def resending(s: State[A]): Behavior[InternalCommand] = {
|
||||
Behaviors
|
||||
.receiveMessage[InternalCommand] {
|
||||
case seqMsg: SequencedMessage[A] =>
|
||||
val seqNr = seqMsg.seqNr
|
||||
|
||||
if (s.isProducerChanged(seqMsg)) {
|
||||
if (seqMsg.first)
|
||||
context.log.trace("Received first SequencedMessage seqNr [{}], delivering to consumer.", seqNr)
|
||||
receiveChangedProducer(s, seqMsg)
|
||||
} else if (s.registering.isDefined) {
|
||||
context.log.debug(
|
||||
"Received SequencedMessage seqNr [{}], discarding message because registering to new ProducerController.",
|
||||
seqNr)
|
||||
Behaviors.same
|
||||
} else if (s.isNextExpected(seqMsg)) {
|
||||
context.log.debug("Received missing SequencedMessage seqNr [{}].", seqNr)
|
||||
deliver(s.copy(receivedSeqNr = seqNr), seqMsg)
|
||||
} else {
|
||||
context.log.debug2(
|
||||
"Received SequencedMessage seqNr [{}], discarding message because waiting for [{}].",
|
||||
seqNr,
|
||||
s.receivedSeqNr + 1)
|
||||
if (seqMsg.first)
|
||||
retryRequest(s)
|
||||
Behaviors.same // ignore until we receive the expected
|
||||
}
|
||||
|
||||
case Retry =>
|
||||
receiveRetry(
|
||||
s,
|
||||
() => {
|
||||
// in case the Resend message was lost
|
||||
context.log.debug("Retry sending Resend [{}].", s.receivedSeqNr + 1)
|
||||
s.producerController ! Resend(fromSeqNr = s.receivedSeqNr + 1)
|
||||
Behaviors.same
|
||||
})
|
||||
|
||||
case Confirmed =>
|
||||
receiveUnexpectedConfirmed()
|
||||
|
||||
case start: Start[A] =>
|
||||
receiveStart(s, start, newState => resending(newState))
|
||||
|
||||
case ConsumerTerminated(c) =>
|
||||
receiveConsumerTerminated(c)
|
||||
|
||||
case reg: RegisterToProducerController[A] =>
|
||||
receiveRegisterToProducerController(s, reg, newState => active(newState))
|
||||
|
||||
case _: DeliverThenStop[_] =>
|
||||
receiveDeliverThenStop(s, newState => resending(newState))
|
||||
|
||||
case _: UnsealedInternalCommand =>
|
||||
Behaviors.unhandled
|
||||
}
|
||||
.receiveSignal {
|
||||
case (_, PostStop) => postStop(s)
|
||||
}
|
||||
}
|
||||
|
||||
private def deliver(s: State[A], seqMsg: SequencedMessage[A]): Behavior[InternalCommand] = {
|
||||
s.consumer ! Delivery(seqMsg.message, context.self, seqMsg.producerId, seqMsg.seqNr)
|
||||
waitingForConfirmation(s, seqMsg)
|
||||
}
|
||||
|
||||
// The message has been delivered to the consumer and it is now waiting for Confirmed from
|
||||
// the consumer. New SequencedMessage from the ProducerController will be stashed.
|
||||
private def waitingForConfirmation(s: State[A], seqMsg: SequencedMessage[A]): Behavior[InternalCommand] = {
|
||||
Behaviors
|
||||
.receiveMessage[InternalCommand] {
|
||||
case Confirmed =>
|
||||
val seqNr = seqMsg.seqNr
|
||||
context.log.trace("Received Confirmed seqNr [{}] from consumer, stashed size [{}].", seqNr, stashBuffer.size)
|
||||
|
||||
val newRequestedSeqNr =
|
||||
if (seqMsg.first) {
|
||||
// confirm the first message immediately to cancel resending of first
|
||||
val newRequestedSeqNr = seqNr - 1 + flowControlWindow
|
||||
context.log.debug(
|
||||
"Sending Request after first with confirmedSeqNr [{}], requestUpToSeqNr [{}].",
|
||||
seqNr,
|
||||
newRequestedSeqNr)
|
||||
s.producerController ! Request(confirmedSeqNr = seqNr, newRequestedSeqNr, resendLost, viaTimeout = false)
|
||||
newRequestedSeqNr
|
||||
} else if ((s.requestedSeqNr - seqNr) == flowControlWindow / 2) {
|
||||
val newRequestedSeqNr = s.requestedSeqNr + flowControlWindow / 2
|
||||
context.log.debug(
|
||||
"Sending Request with confirmedSeqNr [{}], requestUpToSeqNr [{}].",
|
||||
seqNr,
|
||||
newRequestedSeqNr)
|
||||
s.producerController ! Request(confirmedSeqNr = seqNr, newRequestedSeqNr, resendLost, viaTimeout = false)
|
||||
startRetryTimer() // reset interval since Request was just sent
|
||||
newRequestedSeqNr
|
||||
} else {
|
||||
if (seqMsg.ack) {
|
||||
context.log.trace("Sending Ack seqNr [{}].", seqNr)
|
||||
s.producerController ! Ack(confirmedSeqNr = seqNr)
|
||||
}
|
||||
s.requestedSeqNr
|
||||
}
|
||||
|
||||
if (s.stopping && stashBuffer.isEmpty) {
|
||||
context.log.debug("Stopped at seqNr [{}], after delivery of buffered messages.", seqNr)
|
||||
Behaviors.stopped { () =>
|
||||
// best effort to Ack latest confirmed when stopping
|
||||
s.producerController ! Ack(seqNr)
|
||||
}
|
||||
} else {
|
||||
// FIXME #28718 can we use unstashOne instead of all?
|
||||
stashBuffer.unstashAll(active(s.copy(confirmedSeqNr = seqNr, requestedSeqNr = newRequestedSeqNr)))
|
||||
}
|
||||
|
||||
case msg: SequencedMessage[A] =>
|
||||
if (msg.seqNr == seqMsg.seqNr && msg.producerController == seqMsg.producerController) {
|
||||
context.log.debug("Received duplicate SequencedMessage seqNr [{}].", msg.seqNr)
|
||||
} else if (stashBuffer.isFull) {
|
||||
// possible that the stash is full if ProducerController resends unconfirmed (duplicates)
|
||||
// dropping them since they can be resent
|
||||
context.log.debug(
|
||||
"Received SequencedMessage seqNr [{}], discarding message because stash is full.",
|
||||
msg.seqNr)
|
||||
} else {
|
||||
context.log.trace(
|
||||
"Received SequencedMessage seqNr [{}], stashing while waiting for consumer to confirm [{}].",
|
||||
msg.seqNr,
|
||||
seqMsg.seqNr)
|
||||
stashBuffer.stash(msg)
|
||||
}
|
||||
Behaviors.same
|
||||
|
||||
case Retry =>
|
||||
receiveRetry(s, () => waitingForConfirmation(retryRequest(s), seqMsg))
|
||||
|
||||
case start: Start[A] =>
|
||||
start.deliverTo ! Delivery(seqMsg.message, context.self, seqMsg.producerId, seqMsg.seqNr)
|
||||
receiveStart(s, start, newState => waitingForConfirmation(newState, seqMsg))
|
||||
|
||||
case ConsumerTerminated(c) =>
|
||||
receiveConsumerTerminated(c)
|
||||
|
||||
case reg: RegisterToProducerController[A] =>
|
||||
receiveRegisterToProducerController(s, reg, newState => waitingForConfirmation(newState, seqMsg))
|
||||
|
||||
case _: DeliverThenStop[_] =>
|
||||
receiveDeliverThenStop(s, newState => waitingForConfirmation(newState, seqMsg))
|
||||
|
||||
case _: UnsealedInternalCommand =>
|
||||
Behaviors.unhandled
|
||||
}
|
||||
.receiveSignal {
|
||||
case (_, PostStop) => postStop(s)
|
||||
}
|
||||
}
|
||||
|
||||
private def receiveRetry(s: State[A], nextBehavior: () => Behavior[InternalCommand]): Behavior[InternalCommand] = {
|
||||
s.registering match {
|
||||
case None => nextBehavior()
|
||||
case Some(reg) =>
|
||||
reg ! ProducerController.RegisterConsumer(context.self)
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
private def receiveStart(
|
||||
s: State[A],
|
||||
start: Start[A],
|
||||
nextBehavior: State[A] => Behavior[InternalCommand]): Behavior[InternalCommand] = {
|
||||
ConsumerControllerImpl.enforceLocalConsumer(start.deliverTo)
|
||||
if (start.deliverTo == s.consumer) {
|
||||
nextBehavior(s)
|
||||
} else {
|
||||
// if consumer is restarted it may send Start again
|
||||
context.unwatch(s.consumer)
|
||||
context.watchWith(start.deliverTo, ConsumerTerminated(start.deliverTo))
|
||||
nextBehavior(s.copy(consumer = start.deliverTo))
|
||||
}
|
||||
}
|
||||
|
||||
private def receiveRegisterToProducerController(
|
||||
s: State[A],
|
||||
reg: RegisterToProducerController[A],
|
||||
nextBehavior: State[A] => Behavior[InternalCommand]): Behavior[InternalCommand] = {
|
||||
if (reg.producerController != s.producerController) {
|
||||
context.log.debug2(
|
||||
"Register to new ProducerController [{}], previous was [{}].",
|
||||
reg.producerController,
|
||||
s.producerController)
|
||||
reg.producerController ! ProducerController.RegisterConsumer(context.self)
|
||||
nextBehavior(s.copy(registering = Some(reg.producerController)))
|
||||
} else {
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
private def receiveDeliverThenStop(
|
||||
s: State[A],
|
||||
nextBehavior: State[A] => Behavior[InternalCommand]): Behavior[InternalCommand] = {
|
||||
if (stashBuffer.isEmpty && s.receivedSeqNr == s.confirmedSeqNr) {
|
||||
context.log.debug("Stopped at seqNr [{}], no buffered messages.", s.confirmedSeqNr)
|
||||
Behaviors.stopped
|
||||
} else {
|
||||
nextBehavior(s.copy(stopping = true))
|
||||
}
|
||||
}
|
||||
|
||||
private def receiveConsumerTerminated(c: ActorRef[_]): Behavior[InternalCommand] = {
|
||||
context.log.debug("Consumer [{}] terminated.", c)
|
||||
Behaviors.stopped
|
||||
}
|
||||
|
||||
private def receiveUnexpectedConfirmed(): Behavior[InternalCommand] = {
|
||||
context.log.warn("Received unexpected Confirmed from consumer.")
|
||||
Behaviors.unhandled
|
||||
}
|
||||
|
||||
private def startRetryTimer(): Unit = {
|
||||
timers.startTimerWithFixedDelay(Retry, Retry, settings.resendInterval)
|
||||
}
|
||||
|
||||
// in case the Request or the SequencedMessage triggering the Request is lost
|
||||
private def retryRequest(s: State[A]): State[A] = {
|
||||
if (s.producerController == context.system.deadLetters) {
|
||||
s
|
||||
} else {
|
||||
// TODO #28720 Maybe try to adjust the retry frequency. Maybe some exponential backoff and less need for it when
|
||||
// SequenceMessage are arriving. On the other hand it might be too much overhead to reschedule of each
|
||||
// incoming SequenceMessage.
|
||||
val newRequestedSeqNr = if (resendLost) s.requestedSeqNr else s.receivedSeqNr + flowControlWindow / 2
|
||||
context.log.debug(
|
||||
"Retry sending Request with confirmedSeqNr [{}], requestUpToSeqNr [{}].",
|
||||
s.confirmedSeqNr,
|
||||
newRequestedSeqNr)
|
||||
// TODO #28720 maybe watch the producer to avoid sending retry Request to dead producer
|
||||
s.producerController ! Request(s.confirmedSeqNr, newRequestedSeqNr, resendLost, viaTimeout = true)
|
||||
s.copy(requestedSeqNr = newRequestedSeqNr)
|
||||
}
|
||||
}
|
||||
|
||||
private def postStop(s: State[A]): Behavior[InternalCommand] = {
|
||||
// best effort to Ack latest confirmed when stopping
|
||||
s.producerController ! Ack(s.confirmedSeqNr)
|
||||
Behaviors.same
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery.internal
|
||||
|
||||
import akka.annotation.InternalApi
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] trait DeliverySerializable
|
||||
|
|
@ -0,0 +1,628 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery.internal
|
||||
|
||||
import java.util.concurrent.TimeoutException
|
||||
|
||||
import scala.concurrent.duration._
|
||||
import scala.reflect.ClassTag
|
||||
import scala.util.Failure
|
||||
import scala.util.Success
|
||||
|
||||
import akka.actor.DeadLetterSuppression
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.delivery.ConsumerController.SequencedMessage
|
||||
import akka.actor.typed.delivery.DurableProducerQueue
|
||||
import akka.actor.typed.delivery.ProducerController
|
||||
import akka.actor.typed.scaladsl.ActorContext
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.actor.typed.scaladsl.LoggerOps
|
||||
import akka.actor.typed.scaladsl.TimerScheduler
|
||||
import akka.util.Timeout
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
* ==== Design notes ====
|
||||
*
|
||||
* The producer will start the flow by sending a [[ProducerController.Start]] message to the `ProducerController` with
|
||||
* message adapter reference to convert [[ProducerController.RequestNext]] message.
|
||||
* The `ProducerController` sends `RequestNext` to the producer, which is then allowed to send one message to
|
||||
* the `ProducerController`.
|
||||
*
|
||||
* The producer and `ProducerController` are supposed to be local so that these messages are fast and not lost.
|
||||
*
|
||||
* The `ProducerController` sends the first message to the `ConsumerController` without waiting for
|
||||
* a `Request` from the `ConsumerController`. The main reason for this is that when used with
|
||||
* Cluster Sharding the first message will typically create the `ConsumerController`. It's
|
||||
* also a way to connect the ProducerController and ConsumerController in a dynamic way, for
|
||||
* example when the ProducerController is replaced.
|
||||
*
|
||||
* When the first message is received by the `ConsumerController` it sends back the initial `Request`,
|
||||
* with demand of how many messages it can accept.
|
||||
*
|
||||
* Apart from the first message the `ProducerController` will not send more messages than requested
|
||||
* by the `ConsumerController`.
|
||||
*
|
||||
* When there is demand from the consumer side the `ProducerController` sends `RequestNext` to the
|
||||
* actual producer, which is then allowed to send one more message.
|
||||
*
|
||||
* Each message is wrapped by the `ProducerController` in [[ConsumerController.SequencedMessage]] with
|
||||
* a monotonically increasing sequence number without gaps, starting at 1.
|
||||
*
|
||||
* In other words, the "request" protocol to the application producer and consumer is one-by-one, but
|
||||
* between the `ProducerController` and `ConsumerController` it's window of messages in flight.
|
||||
*
|
||||
* The `Request` message also contains a `confirmedSeqNr` that is the acknowledgement
|
||||
* from the consumer that it has received and processed all messages up to that sequence number.
|
||||
*
|
||||
* The `ConsumerController` will send [[ProducerControllerImpl.Resend]] if a lost message is detected
|
||||
* and then the `ProducerController` will resend all messages from that sequence number. The producer keeps
|
||||
* unconfirmed messages in a buffer to be able to resend them. The buffer size is limited
|
||||
* by the request window size.
|
||||
*
|
||||
* The resending is optional, and the `ConsumerController` can be started with `resendLost=false`
|
||||
* to ignore lost messages, and then the `ProducerController` will not buffer unconfirmed messages.
|
||||
* In that mode it provides only flow control but no reliable delivery.
|
||||
*/
|
||||
object ProducerControllerImpl {
|
||||
|
||||
import ProducerController.Command
|
||||
import ProducerController.RegisterConsumer
|
||||
import ProducerController.RequestNext
|
||||
import ProducerController.SeqNr
|
||||
import ProducerController.Start
|
||||
|
||||
sealed trait InternalCommand
|
||||
|
||||
/** For commands defined in public ProducerController */
|
||||
trait UnsealedInternalCommand extends InternalCommand
|
||||
|
||||
final case class Request(confirmedSeqNr: SeqNr, requestUpToSeqNr: SeqNr, supportResend: Boolean, viaTimeout: Boolean)
|
||||
extends InternalCommand
|
||||
with DeliverySerializable
|
||||
with DeadLetterSuppression {
|
||||
require(
|
||||
confirmedSeqNr <= requestUpToSeqNr,
|
||||
s"confirmedSeqNr [$confirmedSeqNr] should be <= requestUpToSeqNr [$requestUpToSeqNr]")
|
||||
}
|
||||
final case class Resend(fromSeqNr: SeqNr) extends InternalCommand with DeliverySerializable with DeadLetterSuppression
|
||||
final case class Ack(confirmedSeqNr: SeqNr)
|
||||
extends InternalCommand
|
||||
with DeliverySerializable
|
||||
with DeadLetterSuppression
|
||||
|
||||
private case class Msg[A](msg: A) extends InternalCommand
|
||||
private case object ResendFirst extends InternalCommand
|
||||
case object ResendFirstUnconfirmed extends InternalCommand
|
||||
|
||||
private case class LoadStateReply[A](state: DurableProducerQueue.State[A]) extends InternalCommand
|
||||
private case class LoadStateFailed(attempt: Int) extends InternalCommand
|
||||
private case class StoreMessageSentReply(ack: DurableProducerQueue.StoreMessageSentAck)
|
||||
private case class StoreMessageSentFailed[A](messageSent: DurableProducerQueue.MessageSent[A], attempt: Int)
|
||||
extends InternalCommand
|
||||
private case object DurableQueueTerminated extends InternalCommand
|
||||
|
||||
private case class StoreMessageSentCompleted[A](messageSent: DurableProducerQueue.MessageSent[A])
|
||||
extends InternalCommand
|
||||
|
||||
private final case class State[A](
|
||||
requested: Boolean,
|
||||
currentSeqNr: SeqNr,
|
||||
confirmedSeqNr: SeqNr,
|
||||
requestedSeqNr: SeqNr,
|
||||
replyAfterStore: Map[SeqNr, ActorRef[SeqNr]],
|
||||
supportResend: Boolean,
|
||||
unconfirmed: Vector[ConsumerController.SequencedMessage[A]],
|
||||
firstSeqNr: SeqNr,
|
||||
producer: ActorRef[ProducerController.RequestNext[A]],
|
||||
send: ConsumerController.SequencedMessage[A] => Unit)
|
||||
|
||||
def apply[A: ClassTag](
|
||||
producerId: String,
|
||||
durableQueueBehavior: Option[Behavior[DurableProducerQueue.Command[A]]],
|
||||
settings: ProducerController.Settings): Behavior[Command[A]] = {
|
||||
Behaviors
|
||||
.setup[InternalCommand] { context =>
|
||||
Behaviors.withMdc(staticMdc = Map("producerId" -> producerId)) {
|
||||
context.setLoggerName("akka.actor.typed.delivery.ProducerController")
|
||||
val durableQueue = askLoadState(context, durableQueueBehavior, settings)
|
||||
waitingForInitialization[A](
|
||||
context,
|
||||
None,
|
||||
None,
|
||||
durableQueue,
|
||||
settings,
|
||||
createInitialState(durableQueue.nonEmpty)) { (producer, consumerController, loadedState) =>
|
||||
val send: ConsumerController.SequencedMessage[A] => Unit = consumerController ! _
|
||||
becomeActive(
|
||||
producerId,
|
||||
durableQueue,
|
||||
settings,
|
||||
createState(context.self, producerId, send, producer, loadedState))
|
||||
}
|
||||
}
|
||||
}
|
||||
.narrow
|
||||
}
|
||||
|
||||
/**
|
||||
* For custom `send` function. For example used with Sharding where the message must be wrapped in
|
||||
* `ShardingEnvelope(SequencedMessage(msg))`.
|
||||
*/
|
||||
def apply[A: ClassTag](
|
||||
producerId: String,
|
||||
durableQueueBehavior: Option[Behavior[DurableProducerQueue.Command[A]]],
|
||||
settings: ProducerController.Settings,
|
||||
send: ConsumerController.SequencedMessage[A] => Unit): Behavior[Command[A]] = {
|
||||
Behaviors
|
||||
.setup[InternalCommand] { context =>
|
||||
Behaviors.withMdc(staticMdc = Map("producerId" -> producerId)) {
|
||||
context.setLoggerName("akka.actor.typed.delivery.ProducerController")
|
||||
val durableQueue = askLoadState(context, durableQueueBehavior, settings)
|
||||
// ConsumerController not used here
|
||||
waitingForInitialization[A](
|
||||
context,
|
||||
None,
|
||||
consumerController = Some(context.system.deadLetters),
|
||||
durableQueue,
|
||||
settings,
|
||||
createInitialState(durableQueue.nonEmpty)) { (producer, _, loadedState) =>
|
||||
becomeActive(
|
||||
producerId,
|
||||
durableQueue,
|
||||
settings,
|
||||
createState(context.self, producerId, send, producer, loadedState))
|
||||
}
|
||||
}
|
||||
}
|
||||
.narrow
|
||||
}
|
||||
|
||||
private def askLoadState[A: ClassTag](
|
||||
context: ActorContext[InternalCommand],
|
||||
durableQueueBehavior: Option[Behavior[DurableProducerQueue.Command[A]]],
|
||||
settings: ProducerController.Settings): Option[ActorRef[DurableProducerQueue.Command[A]]] = {
|
||||
|
||||
durableQueueBehavior.map { b =>
|
||||
val ref = context.spawn(b, "durable")
|
||||
context.watchWith(ref, DurableQueueTerminated)
|
||||
askLoadState(context, Some(ref), settings, attempt = 1)
|
||||
ref
|
||||
}
|
||||
}
|
||||
|
||||
private def askLoadState[A: ClassTag](
|
||||
context: ActorContext[InternalCommand],
|
||||
durableQueue: Option[ActorRef[DurableProducerQueue.Command[A]]],
|
||||
settings: ProducerController.Settings,
|
||||
attempt: Int): Unit = {
|
||||
implicit val loadTimeout: Timeout = settings.durableQueueRequestTimeout
|
||||
durableQueue.foreach { ref =>
|
||||
context.ask[DurableProducerQueue.LoadState[A], DurableProducerQueue.State[A]](
|
||||
ref,
|
||||
askReplyTo => DurableProducerQueue.LoadState[A](askReplyTo)) {
|
||||
case Success(s) => LoadStateReply(s)
|
||||
case Failure(_) => LoadStateFailed(attempt) // timeout
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def createInitialState[A: ClassTag](hasDurableQueue: Boolean) = {
|
||||
if (hasDurableQueue) None else Some(DurableProducerQueue.State.empty[A])
|
||||
}
|
||||
|
||||
private def createState[A: ClassTag](
|
||||
self: ActorRef[InternalCommand],
|
||||
producerId: String,
|
||||
send: SequencedMessage[A] => Unit,
|
||||
producer: ActorRef[RequestNext[A]],
|
||||
loadedState: DurableProducerQueue.State[A]): State[A] = {
|
||||
val unconfirmed = loadedState.unconfirmed.toVector.zipWithIndex.map {
|
||||
case (u, i) => SequencedMessage[A](producerId, u.seqNr, u.message, i == 0, u.ack)(self)
|
||||
}
|
||||
State(
|
||||
requested = false,
|
||||
currentSeqNr = loadedState.currentSeqNr,
|
||||
confirmedSeqNr = loadedState.highestConfirmedSeqNr,
|
||||
requestedSeqNr = 1L,
|
||||
replyAfterStore = Map.empty,
|
||||
supportResend = true,
|
||||
unconfirmed = unconfirmed,
|
||||
firstSeqNr = loadedState.highestConfirmedSeqNr + 1,
|
||||
producer,
|
||||
send)
|
||||
}
|
||||
|
||||
private def waitingForInitialization[A: ClassTag](
|
||||
context: ActorContext[InternalCommand],
|
||||
producer: Option[ActorRef[RequestNext[A]]],
|
||||
consumerController: Option[ActorRef[ConsumerController.Command[A]]],
|
||||
durableQueue: Option[ActorRef[DurableProducerQueue.Command[A]]],
|
||||
settings: ProducerController.Settings,
|
||||
initialState: Option[DurableProducerQueue.State[A]])(
|
||||
thenBecomeActive: (
|
||||
ActorRef[RequestNext[A]],
|
||||
ActorRef[ConsumerController.Command[A]],
|
||||
DurableProducerQueue.State[A]) => Behavior[InternalCommand]): Behavior[InternalCommand] = {
|
||||
Behaviors.receiveMessagePartial[InternalCommand] {
|
||||
case RegisterConsumer(c: ActorRef[ConsumerController.Command[A]] @unchecked) =>
|
||||
(producer, initialState) match {
|
||||
case (Some(p), Some(s)) => thenBecomeActive(p, c, s)
|
||||
case (_, _) =>
|
||||
waitingForInitialization(context, producer, Some(c), durableQueue, settings, initialState)(thenBecomeActive)
|
||||
}
|
||||
case start: Start[A] @unchecked =>
|
||||
(consumerController, initialState) match {
|
||||
case (Some(c), Some(s)) => thenBecomeActive(start.producer, c, s)
|
||||
case (_, _) =>
|
||||
waitingForInitialization(
|
||||
context,
|
||||
Some(start.producer),
|
||||
consumerController,
|
||||
durableQueue,
|
||||
settings,
|
||||
initialState)(thenBecomeActive)
|
||||
}
|
||||
case load: LoadStateReply[A] @unchecked =>
|
||||
(producer, consumerController) match {
|
||||
case (Some(p), Some(c)) => thenBecomeActive(p, c, load.state)
|
||||
case (_, _) =>
|
||||
waitingForInitialization(context, producer, consumerController, durableQueue, settings, Some(load.state))(
|
||||
thenBecomeActive)
|
||||
}
|
||||
case LoadStateFailed(attempt) =>
|
||||
if (attempt >= settings.durableQueueRetryAttempts) {
|
||||
val errorMessage = s"LoadState failed after [$attempt] attempts, giving up."
|
||||
context.log.error(errorMessage)
|
||||
throw new TimeoutException(errorMessage)
|
||||
} else {
|
||||
context.log.warn(
|
||||
"LoadState failed, attempt [{}] of [{}], retrying.",
|
||||
attempt,
|
||||
settings.durableQueueRetryAttempts)
|
||||
// retry
|
||||
askLoadState(context, durableQueue, settings, attempt + 1)
|
||||
Behaviors.same
|
||||
}
|
||||
case DurableQueueTerminated =>
|
||||
throw new IllegalStateException("DurableQueue was unexpectedly terminated.")
|
||||
}
|
||||
}
|
||||
|
||||
private def becomeActive[A: ClassTag](
|
||||
producerId: String,
|
||||
durableQueue: Option[ActorRef[DurableProducerQueue.Command[A]]],
|
||||
settings: ProducerController.Settings,
|
||||
state: State[A]): Behavior[InternalCommand] = {
|
||||
|
||||
Behaviors.setup { context =>
|
||||
Behaviors.withTimers { timers =>
|
||||
val msgAdapter: ActorRef[A] = context.messageAdapter(msg => Msg(msg))
|
||||
val requested =
|
||||
if (state.unconfirmed.isEmpty) {
|
||||
state.producer ! RequestNext(producerId, 1L, 0L, msgAdapter, context.self)
|
||||
true
|
||||
} else {
|
||||
context.log.debug("Starting with [{}] unconfirmed.", state.unconfirmed.size)
|
||||
context.self ! ResendFirst
|
||||
false
|
||||
}
|
||||
new ProducerControllerImpl[A](context, producerId, durableQueue, settings, msgAdapter, timers)
|
||||
.active(state.copy(requested = requested))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def enforceLocalProducer(ref: ActorRef[_]): Unit = {
|
||||
if (ref.path.address.hasGlobalScope)
|
||||
throw new IllegalArgumentException(s"Consumer [$ref] should be local.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private class ProducerControllerImpl[A: ClassTag](
|
||||
context: ActorContext[ProducerControllerImpl.InternalCommand],
|
||||
producerId: String,
|
||||
durableQueue: Option[ActorRef[DurableProducerQueue.Command[A]]],
|
||||
settings: ProducerController.Settings,
|
||||
msgAdapter: ActorRef[A],
|
||||
timers: TimerScheduler[ProducerControllerImpl.InternalCommand]) {
|
||||
import ConsumerController.SequencedMessage
|
||||
import DurableProducerQueue.MessageSent
|
||||
import DurableProducerQueue.NoQualifier
|
||||
import DurableProducerQueue.StoreMessageConfirmed
|
||||
import DurableProducerQueue.StoreMessageSent
|
||||
import DurableProducerQueue.StoreMessageSentAck
|
||||
import ProducerController.MessageWithConfirmation
|
||||
import ProducerController.RegisterConsumer
|
||||
import ProducerController.RequestNext
|
||||
import ProducerController.SeqNr
|
||||
import ProducerController.Start
|
||||
import ProducerControllerImpl._
|
||||
|
||||
// for the durableQueue StoreMessageSent ask
|
||||
private implicit val askTimeout: Timeout = settings.durableQueueRequestTimeout
|
||||
|
||||
private def active(s: State[A]): Behavior[InternalCommand] = {
|
||||
|
||||
def onMsg(m: A, newReplyAfterStore: Map[SeqNr, ActorRef[SeqNr]], ack: Boolean): Behavior[InternalCommand] = {
|
||||
checkOnMsgRequestedState()
|
||||
if (context.log.isTraceEnabled)
|
||||
context.log.trace("Sending [{}] with seqNr [{}].", m.getClass.getName, s.currentSeqNr)
|
||||
val seqMsg = SequencedMessage(producerId, s.currentSeqNr, m, s.currentSeqNr == s.firstSeqNr, ack)(context.self)
|
||||
val newUnconfirmed =
|
||||
if (s.supportResend) s.unconfirmed :+ seqMsg
|
||||
else Vector.empty // no resending, no need to keep unconfirmed
|
||||
|
||||
if (s.currentSeqNr == s.firstSeqNr)
|
||||
timers.startTimerWithFixedDelay(ResendFirst, ResendFirst, 1.second)
|
||||
|
||||
s.send(seqMsg)
|
||||
val newRequested =
|
||||
if (s.currentSeqNr == s.requestedSeqNr)
|
||||
false
|
||||
else {
|
||||
s.producer ! RequestNext(producerId, s.currentSeqNr + 1, s.confirmedSeqNr, msgAdapter, context.self)
|
||||
true
|
||||
}
|
||||
active(
|
||||
s.copy(
|
||||
requested = newRequested,
|
||||
currentSeqNr = s.currentSeqNr + 1,
|
||||
replyAfterStore = newReplyAfterStore,
|
||||
unconfirmed = newUnconfirmed))
|
||||
}
|
||||
|
||||
def checkOnMsgRequestedState(): Unit = {
|
||||
if (!s.requested || s.currentSeqNr > s.requestedSeqNr) {
|
||||
throw new IllegalStateException(
|
||||
s"Unexpected Msg when no demand, requested ${s.requested}, " +
|
||||
s"requestedSeqNr ${s.requestedSeqNr}, currentSeqNr ${s.currentSeqNr}")
|
||||
}
|
||||
}
|
||||
|
||||
def receiveRequest(
|
||||
newConfirmedSeqNr: SeqNr,
|
||||
newRequestedSeqNr: SeqNr,
|
||||
supportResend: Boolean,
|
||||
viaTimeout: Boolean): Behavior[InternalCommand] = {
|
||||
context.log.debugN(
|
||||
"Received Request, confirmed [{}], requested [{}], current [{}]",
|
||||
newConfirmedSeqNr,
|
||||
newRequestedSeqNr,
|
||||
s.currentSeqNr)
|
||||
|
||||
val stateAfterAck = onAck(newConfirmedSeqNr)
|
||||
|
||||
val newUnconfirmed =
|
||||
if (supportResend) stateAfterAck.unconfirmed
|
||||
else Vector.empty
|
||||
|
||||
if ((viaTimeout || newConfirmedSeqNr == s.firstSeqNr) && supportResend) {
|
||||
// the last message was lost and no more message was sent that would trigger Resend
|
||||
resendUnconfirmed(newUnconfirmed)
|
||||
}
|
||||
|
||||
// when supportResend=false the requestedSeqNr window must be expanded if all sent messages were lost
|
||||
val newRequestedSeqNr2 =
|
||||
if (!supportResend && newRequestedSeqNr <= stateAfterAck.currentSeqNr)
|
||||
stateAfterAck.currentSeqNr + (newRequestedSeqNr - newConfirmedSeqNr)
|
||||
else
|
||||
newRequestedSeqNr
|
||||
if (newRequestedSeqNr2 != newRequestedSeqNr)
|
||||
context.log.debugN(
|
||||
"Expanded requestedSeqNr from [{}] to [{}], because current [{}] and all were probably lost",
|
||||
newRequestedSeqNr,
|
||||
newRequestedSeqNr2,
|
||||
stateAfterAck.currentSeqNr)
|
||||
|
||||
if (newRequestedSeqNr2 > s.requestedSeqNr) {
|
||||
if (!s.requested && (newRequestedSeqNr2 - s.currentSeqNr) > 0)
|
||||
s.producer ! RequestNext(producerId, s.currentSeqNr, newConfirmedSeqNr, msgAdapter, context.self)
|
||||
active(
|
||||
stateAfterAck.copy(
|
||||
requested = true,
|
||||
requestedSeqNr = newRequestedSeqNr2,
|
||||
supportResend = supportResend,
|
||||
unconfirmed = newUnconfirmed))
|
||||
} else {
|
||||
active(stateAfterAck.copy(supportResend = supportResend, unconfirmed = newUnconfirmed))
|
||||
}
|
||||
}
|
||||
|
||||
def receiveAck(newConfirmedSeqNr: SeqNr): Behavior[InternalCommand] = {
|
||||
context.log.trace2("Received Ack, confirmed [{}], current [{}].", newConfirmedSeqNr, s.currentSeqNr)
|
||||
val stateAfterAck = onAck(newConfirmedSeqNr)
|
||||
if (newConfirmedSeqNr == s.firstSeqNr && stateAfterAck.unconfirmed.nonEmpty) {
|
||||
resendUnconfirmed(stateAfterAck.unconfirmed)
|
||||
}
|
||||
active(stateAfterAck)
|
||||
}
|
||||
|
||||
def onAck(newConfirmedSeqNr: SeqNr): State[A] = {
|
||||
val (replies, newReplyAfterStore) = s.replyAfterStore.partition { case (seqNr, _) => seqNr <= newConfirmedSeqNr }
|
||||
if (replies.nonEmpty)
|
||||
context.log.trace("Sending confirmation replies from [{}] to [{}].", replies.head._1, replies.last._1)
|
||||
replies.foreach {
|
||||
case (seqNr, replyTo) => replyTo ! seqNr
|
||||
}
|
||||
|
||||
val newUnconfirmed =
|
||||
if (s.supportResend) s.unconfirmed.dropWhile(_.seqNr <= newConfirmedSeqNr)
|
||||
else Vector.empty
|
||||
|
||||
if (newConfirmedSeqNr == s.firstSeqNr)
|
||||
timers.cancel(ResendFirst)
|
||||
|
||||
val newMaxConfirmedSeqNr = math.max(s.confirmedSeqNr, newConfirmedSeqNr)
|
||||
|
||||
durableQueue.foreach { d =>
|
||||
// Storing the confirmedSeqNr can be "write behind", at-least-once delivery
|
||||
// TODO #28721 to reduce number of writes, consider to only StoreMessageConfirmed for the Request messages and not for each Ack
|
||||
if (newMaxConfirmedSeqNr != s.confirmedSeqNr)
|
||||
d ! StoreMessageConfirmed(newMaxConfirmedSeqNr, NoQualifier, System.currentTimeMillis())
|
||||
}
|
||||
|
||||
s.copy(confirmedSeqNr = newMaxConfirmedSeqNr, replyAfterStore = newReplyAfterStore, unconfirmed = newUnconfirmed)
|
||||
}
|
||||
|
||||
def receiveStoreMessageSentCompleted(seqNr: SeqNr, m: A, ack: Boolean) = {
|
||||
if (seqNr != s.currentSeqNr)
|
||||
throw new IllegalStateException(s"currentSeqNr [${s.currentSeqNr}] not matching stored seqNr [$seqNr]")
|
||||
|
||||
s.replyAfterStore.get(seqNr).foreach { replyTo =>
|
||||
context.log.trace("Sending confirmation reply to [{}] after storage.", seqNr)
|
||||
replyTo ! seqNr
|
||||
}
|
||||
val newReplyAfterStore = s.replyAfterStore - seqNr
|
||||
|
||||
onMsg(m, newReplyAfterStore, ack)
|
||||
}
|
||||
|
||||
def receiveResend(fromSeqNr: SeqNr): Behavior[InternalCommand] = {
|
||||
val newUnconfirmed =
|
||||
if (fromSeqNr == 0 && s.unconfirmed.nonEmpty)
|
||||
s.unconfirmed.head.asFirst +: s.unconfirmed.tail
|
||||
else
|
||||
s.unconfirmed.dropWhile(_.seqNr < fromSeqNr)
|
||||
resendUnconfirmed(newUnconfirmed)
|
||||
active(s.copy(unconfirmed = newUnconfirmed))
|
||||
}
|
||||
|
||||
def resendUnconfirmed(newUnconfirmed: Vector[SequencedMessage[A]]): Unit = {
|
||||
if (newUnconfirmed.nonEmpty)
|
||||
context.log.debug("Resending [{} - {}].", newUnconfirmed.head.seqNr, newUnconfirmed.last.seqNr)
|
||||
newUnconfirmed.foreach(s.send)
|
||||
}
|
||||
|
||||
def receiveResendFirstUnconfirmed(): Behavior[InternalCommand] = {
|
||||
if (s.unconfirmed.nonEmpty) {
|
||||
context.log.debug("Resending first unconfirmed [{}].", s.unconfirmed.head.seqNr)
|
||||
s.send(s.unconfirmed.head)
|
||||
}
|
||||
Behaviors.same
|
||||
}
|
||||
|
||||
def receiveResendFirst(): Behavior[InternalCommand] = {
|
||||
if (s.unconfirmed.nonEmpty && s.unconfirmed.head.seqNr == s.firstSeqNr) {
|
||||
context.log.debug("Resending first, [{}].", s.firstSeqNr)
|
||||
s.send(s.unconfirmed.head.asFirst)
|
||||
} else {
|
||||
if (s.currentSeqNr > s.firstSeqNr)
|
||||
timers.cancel(ResendFirst)
|
||||
}
|
||||
Behaviors.same
|
||||
}
|
||||
|
||||
def receiveStart(start: Start[A]): Behavior[InternalCommand] = {
|
||||
ProducerControllerImpl.enforceLocalProducer(start.producer)
|
||||
context.log.debug("Register new Producer [{}], currentSeqNr [{}].", start.producer, s.currentSeqNr)
|
||||
if (s.requested)
|
||||
start.producer ! RequestNext(producerId, s.currentSeqNr, s.confirmedSeqNr, msgAdapter, context.self)
|
||||
active(s.copy(producer = start.producer))
|
||||
}
|
||||
|
||||
def receiveRegisterConsumer(
|
||||
consumerController: ActorRef[ConsumerController.Command[A]]): Behavior[InternalCommand] = {
|
||||
val newFirstSeqNr =
|
||||
if (s.unconfirmed.isEmpty) s.currentSeqNr
|
||||
else s.unconfirmed.head.seqNr
|
||||
context.log.debug(
|
||||
"Register new ConsumerController [{}], starting with seqNr [{}].",
|
||||
consumerController,
|
||||
newFirstSeqNr)
|
||||
if (s.unconfirmed.nonEmpty) {
|
||||
timers.startTimerWithFixedDelay(ResendFirst, ResendFirst, 1.second)
|
||||
context.self ! ResendFirst
|
||||
}
|
||||
// update the send function
|
||||
val newSend = consumerController ! _
|
||||
active(s.copy(firstSeqNr = newFirstSeqNr, send = newSend))
|
||||
}
|
||||
|
||||
Behaviors.receiveMessage {
|
||||
case MessageWithConfirmation(m: A, replyTo) =>
|
||||
val newReplyAfterStore = s.replyAfterStore.updated(s.currentSeqNr, replyTo)
|
||||
if (durableQueue.isEmpty) {
|
||||
onMsg(m, newReplyAfterStore, ack = true)
|
||||
} else {
|
||||
storeMessageSent(
|
||||
MessageSent(s.currentSeqNr, m, ack = true, NoQualifier, System.currentTimeMillis()),
|
||||
attempt = 1)
|
||||
active(s.copy(replyAfterStore = newReplyAfterStore))
|
||||
}
|
||||
|
||||
case Msg(m: A) =>
|
||||
if (durableQueue.isEmpty) {
|
||||
onMsg(m, s.replyAfterStore, ack = false)
|
||||
} else {
|
||||
storeMessageSent(
|
||||
MessageSent(s.currentSeqNr, m, ack = false, NoQualifier, System.currentTimeMillis()),
|
||||
attempt = 1)
|
||||
Behaviors.same
|
||||
}
|
||||
|
||||
case StoreMessageSentCompleted(MessageSent(seqNr, m: A, ack, NoQualifier, _)) =>
|
||||
receiveStoreMessageSentCompleted(seqNr, m, ack)
|
||||
|
||||
case f: StoreMessageSentFailed[A] =>
|
||||
receiveStoreMessageSentFailed(f)
|
||||
|
||||
case Request(newConfirmedSeqNr, newRequestedSeqNr, supportResend, viaTimeout) =>
|
||||
receiveRequest(newConfirmedSeqNr, newRequestedSeqNr, supportResend, viaTimeout)
|
||||
|
||||
case Ack(newConfirmedSeqNr) =>
|
||||
receiveAck(newConfirmedSeqNr)
|
||||
|
||||
case Resend(fromSeqNr) =>
|
||||
receiveResend(fromSeqNr)
|
||||
|
||||
case ResendFirst =>
|
||||
receiveResendFirst()
|
||||
|
||||
case ResendFirstUnconfirmed =>
|
||||
receiveResendFirstUnconfirmed()
|
||||
|
||||
case start: Start[A] =>
|
||||
receiveStart(start)
|
||||
|
||||
case RegisterConsumer(consumerController: ActorRef[ConsumerController.Command[A]] @unchecked) =>
|
||||
receiveRegisterConsumer(consumerController)
|
||||
|
||||
case DurableQueueTerminated =>
|
||||
throw new IllegalStateException("DurableQueue was unexpectedly terminated.")
|
||||
}
|
||||
}
|
||||
|
||||
private def receiveStoreMessageSentFailed(f: StoreMessageSentFailed[A]): Behavior[InternalCommand] = {
|
||||
if (f.attempt >= settings.durableQueueRetryAttempts) {
|
||||
val errorMessage =
|
||||
s"StoreMessageSentFailed seqNr [${f.messageSent.seqNr}] failed after [${f.attempt}] attempts, giving up."
|
||||
context.log.error(errorMessage)
|
||||
throw new TimeoutException(errorMessage)
|
||||
} else {
|
||||
context.log.warnN(
|
||||
"StoreMessageSent seqNr [{}] failed, attempt [{}] of [{}], retrying.",
|
||||
f.messageSent.seqNr,
|
||||
f.attempt,
|
||||
settings.durableQueueRetryAttempts)
|
||||
// retry
|
||||
storeMessageSent(f.messageSent, attempt = f.attempt + 1)
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
private def storeMessageSent(messageSent: MessageSent[A], attempt: Int): Unit = {
|
||||
context.ask[StoreMessageSent[A], StoreMessageSentAck](
|
||||
durableQueue.get,
|
||||
askReplyTo => StoreMessageSent(messageSent, askReplyTo)) {
|
||||
case Success(_) => StoreMessageSentCompleted(messageSent)
|
||||
case Failure(_) => StoreMessageSentFailed(messageSent, attempt) // timeout
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,674 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.delivery.internal
|
||||
|
||||
import java.util.UUID
|
||||
import java.util.concurrent.ThreadLocalRandom
|
||||
import java.util.concurrent.TimeoutException
|
||||
|
||||
import scala.reflect.ClassTag
|
||||
import scala.util.Failure
|
||||
import scala.util.Success
|
||||
|
||||
import akka.Done
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.delivery.DurableProducerQueue
|
||||
import akka.actor.typed.delivery.DurableProducerQueue.ConfirmationQualifier
|
||||
import akka.actor.typed.delivery.DurableProducerQueue.SeqNr
|
||||
import akka.actor.typed.delivery.ProducerController
|
||||
import akka.actor.typed.delivery.WorkPullingProducerController
|
||||
import akka.actor.typed.receptionist.Receptionist
|
||||
import akka.actor.typed.receptionist.ServiceKey
|
||||
import akka.actor.typed.scaladsl.ActorContext
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.actor.typed.scaladsl.LoggerOps
|
||||
import akka.actor.typed.scaladsl.StashBuffer
|
||||
import akka.annotation.InternalApi
|
||||
import akka.util.Timeout
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] object WorkPullingProducerControllerImpl {
|
||||
|
||||
import WorkPullingProducerController.Command
|
||||
import WorkPullingProducerController.RequestNext
|
||||
import WorkPullingProducerController.Start
|
||||
|
||||
sealed trait InternalCommand
|
||||
|
||||
/** For commands defined in public WorkPullingProducerController */
|
||||
trait UnsealedInternalCommand extends InternalCommand
|
||||
|
||||
private type TotalSeqNr = Long
|
||||
private type OutSeqNr = Long
|
||||
private type OutKey = String
|
||||
|
||||
private final case class WorkerRequestNext[A](next: ProducerController.RequestNext[A]) extends InternalCommand
|
||||
|
||||
private final case class Ack(outKey: OutKey, confirmedSeqNr: OutSeqNr) extends InternalCommand
|
||||
private final case class AskTimeout(outKey: OutKey, outSeqNr: OutSeqNr) extends InternalCommand
|
||||
|
||||
private case object RegisterConsumerDone extends InternalCommand
|
||||
|
||||
private case class LoadStateReply[A](state: DurableProducerQueue.State[A]) extends InternalCommand
|
||||
private case class LoadStateFailed(attempt: Int) extends InternalCommand
|
||||
private case class StoreMessageSentReply(ack: DurableProducerQueue.StoreMessageSentAck)
|
||||
private case class StoreMessageSentFailed[A](messageSent: DurableProducerQueue.MessageSent[A], attempt: Int)
|
||||
extends InternalCommand
|
||||
private case class StoreMessageSentCompleted[A](messageSent: DurableProducerQueue.MessageSent[A])
|
||||
extends InternalCommand
|
||||
private case object DurableQueueTerminated extends InternalCommand
|
||||
|
||||
private final case class OutState[A](
|
||||
producerController: ActorRef[ProducerController.Command[A]],
|
||||
consumerController: ActorRef[ConsumerController.Command[A]],
|
||||
seqNr: OutSeqNr,
|
||||
unconfirmed: Vector[Unconfirmed[A]],
|
||||
askNextTo: Option[ActorRef[ProducerController.MessageWithConfirmation[A]]]) {
|
||||
def confirmationQualifier: ConfirmationQualifier = producerController.path.name
|
||||
}
|
||||
|
||||
private final case class Unconfirmed[A](
|
||||
totalSeqNr: TotalSeqNr,
|
||||
outSeqNr: OutSeqNr,
|
||||
msg: A,
|
||||
replyTo: Option[ActorRef[Done]])
|
||||
|
||||
private final case class State[A](
|
||||
currentSeqNr: TotalSeqNr, // only updated when durableQueue is enabled
|
||||
workers: Set[ActorRef[ConsumerController.Command[A]]],
|
||||
out: Map[OutKey, OutState[A]],
|
||||
// when durableQueue is enabled the worker must be selecting before storage
|
||||
// to know the confirmationQualifier up-front
|
||||
preselectedWorkers: Map[TotalSeqNr, PreselectedWorker],
|
||||
// replyAfterStore is used when durableQueue is enabled, otherwise they are tracked in OutState
|
||||
replyAfterStore: Map[TotalSeqNr, ActorRef[Done]],
|
||||
// when the worker is deregistered but there are still unconfirmed
|
||||
handOver: Map[TotalSeqNr, HandOver],
|
||||
producer: ActorRef[WorkPullingProducerController.RequestNext[A]],
|
||||
requested: Boolean)
|
||||
|
||||
private case class PreselectedWorker(outKey: OutKey, confirmationQualifier: ConfirmationQualifier)
|
||||
|
||||
private case class HandOver(oldConfirmationQualifier: ConfirmationQualifier, oldSeqNr: TotalSeqNr)
|
||||
|
||||
// registration of workers via Receptionist
|
||||
private final case class CurrentWorkers[A](workers: Set[ActorRef[ConsumerController.Command[A]]])
|
||||
extends InternalCommand
|
||||
|
||||
private final case class Msg[A](msg: A, wasStashed: Boolean, replyTo: Option[ActorRef[Done]]) extends InternalCommand
|
||||
|
||||
private final case class ResendDurableMsg[A](
|
||||
msg: A,
|
||||
oldConfirmationQualifier: ConfirmationQualifier,
|
||||
oldSeqNr: TotalSeqNr)
|
||||
extends InternalCommand
|
||||
|
||||
def apply[A: ClassTag](
|
||||
producerId: String,
|
||||
workerServiceKey: ServiceKey[ConsumerController.Command[A]],
|
||||
durableQueueBehavior: Option[Behavior[DurableProducerQueue.Command[A]]],
|
||||
settings: WorkPullingProducerController.Settings): Behavior[Command[A]] = {
|
||||
Behaviors
|
||||
.withStash[InternalCommand](settings.bufferSize) { stashBuffer =>
|
||||
Behaviors.setup[InternalCommand] { context =>
|
||||
Behaviors.withMdc(staticMdc = Map("producerId" -> producerId)) {
|
||||
context.setLoggerName("akka.actor.typed.delivery.WorkPullingProducerController")
|
||||
val listingAdapter = context.messageAdapter[Receptionist.Listing](listing =>
|
||||
CurrentWorkers[A](listing.allServiceInstances(workerServiceKey)))
|
||||
context.system.receptionist ! Receptionist.Subscribe(workerServiceKey, listingAdapter)
|
||||
|
||||
val durableQueue = askLoadState(context, durableQueueBehavior, settings)
|
||||
|
||||
waitingForStart(
|
||||
producerId,
|
||||
context,
|
||||
stashBuffer,
|
||||
durableQueue,
|
||||
settings,
|
||||
None,
|
||||
createInitialState(durableQueue.nonEmpty))
|
||||
}
|
||||
}
|
||||
}
|
||||
.narrow
|
||||
}
|
||||
|
||||
private def createInitialState[A: ClassTag](hasDurableQueue: Boolean) = {
|
||||
if (hasDurableQueue) None else Some(DurableProducerQueue.State.empty[A])
|
||||
}
|
||||
|
||||
private def waitingForStart[A: ClassTag](
|
||||
producerId: String,
|
||||
context: ActorContext[InternalCommand],
|
||||
stashBuffer: StashBuffer[InternalCommand],
|
||||
durableQueue: Option[ActorRef[DurableProducerQueue.Command[A]]],
|
||||
settings: WorkPullingProducerController.Settings,
|
||||
producer: Option[ActorRef[RequestNext[A]]],
|
||||
initialState: Option[DurableProducerQueue.State[A]]): Behavior[InternalCommand] = {
|
||||
|
||||
def becomeActive(p: ActorRef[RequestNext[A]], s: DurableProducerQueue.State[A]): Behavior[InternalCommand] = {
|
||||
// resend unconfirmed to self, order doesn't matter for work pulling
|
||||
s.unconfirmed.foreach {
|
||||
case DurableProducerQueue.MessageSent(oldSeqNr, msg, _, oldConfirmationQualifier, _) =>
|
||||
context.self ! ResendDurableMsg(msg, oldConfirmationQualifier, oldSeqNr)
|
||||
}
|
||||
|
||||
val msgAdapter: ActorRef[A] = context.messageAdapter(msg => Msg(msg, wasStashed = false, replyTo = None))
|
||||
val requestNext = RequestNext[A](msgAdapter, context.self)
|
||||
val b =
|
||||
new WorkPullingProducerControllerImpl(context, stashBuffer, producerId, requestNext, durableQueue, settings)
|
||||
.active(createInitialState(s.currentSeqNr, p))
|
||||
stashBuffer.unstashAll(b)
|
||||
}
|
||||
|
||||
Behaviors.receiveMessage {
|
||||
case start: Start[A] @unchecked =>
|
||||
ProducerControllerImpl.enforceLocalProducer(start.producer)
|
||||
initialState match {
|
||||
case Some(s) =>
|
||||
becomeActive(start.producer, s)
|
||||
case None =>
|
||||
// waiting for LoadStateReply
|
||||
waitingForStart(
|
||||
producerId,
|
||||
context,
|
||||
stashBuffer,
|
||||
durableQueue,
|
||||
settings,
|
||||
Some(start.producer),
|
||||
initialState)
|
||||
}
|
||||
|
||||
case load: LoadStateReply[A] @unchecked =>
|
||||
producer match {
|
||||
case Some(p) =>
|
||||
becomeActive(p, load.state)
|
||||
case None =>
|
||||
// waiting for LoadStateReply
|
||||
waitingForStart(producerId, context, stashBuffer, durableQueue, settings, producer, Some(load.state))
|
||||
}
|
||||
|
||||
case LoadStateFailed(attempt) =>
|
||||
if (attempt >= settings.producerControllerSettings.durableQueueRetryAttempts) {
|
||||
val errorMessage = s"LoadState failed after [$attempt] attempts, giving up."
|
||||
context.log.error(errorMessage)
|
||||
throw new TimeoutException(errorMessage)
|
||||
} else {
|
||||
context.log.warn(
|
||||
"LoadState failed, attempt [{}] of [{}], retrying.",
|
||||
attempt,
|
||||
settings.producerControllerSettings.durableQueueRetryAttempts)
|
||||
// retry
|
||||
askLoadState(context, durableQueue, settings, attempt + 1)
|
||||
Behaviors.same
|
||||
}
|
||||
|
||||
case DurableQueueTerminated =>
|
||||
throw new IllegalStateException("DurableQueue was unexpectedly terminated.")
|
||||
|
||||
case other =>
|
||||
checkStashFull(stashBuffer)
|
||||
stashBuffer.stash(other)
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
private def checkStashFull[A: ClassTag](stashBuffer: StashBuffer[InternalCommand]): Unit = {
|
||||
if (stashBuffer.isFull)
|
||||
throw new IllegalArgumentException(s"Buffer is full, size [${stashBuffer.size}].")
|
||||
}
|
||||
|
||||
private def askLoadState[A: ClassTag](
|
||||
context: ActorContext[InternalCommand],
|
||||
durableQueueBehavior: Option[Behavior[DurableProducerQueue.Command[A]]],
|
||||
settings: WorkPullingProducerController.Settings): Option[ActorRef[DurableProducerQueue.Command[A]]] = {
|
||||
|
||||
durableQueueBehavior.map { b =>
|
||||
val ref = context.spawn(b, "durable")
|
||||
context.watchWith(ref, DurableQueueTerminated)
|
||||
askLoadState(context, Some(ref), settings, attempt = 1)
|
||||
ref
|
||||
}
|
||||
}
|
||||
|
||||
private def askLoadState[A: ClassTag](
|
||||
context: ActorContext[InternalCommand],
|
||||
durableQueue: Option[ActorRef[DurableProducerQueue.Command[A]]],
|
||||
settings: WorkPullingProducerController.Settings,
|
||||
attempt: Int): Unit = {
|
||||
implicit val loadTimeout: Timeout = settings.producerControllerSettings.durableQueueRequestTimeout
|
||||
durableQueue.foreach { ref =>
|
||||
context.ask[DurableProducerQueue.LoadState[A], DurableProducerQueue.State[A]](
|
||||
ref,
|
||||
askReplyTo => DurableProducerQueue.LoadState[A](askReplyTo)) {
|
||||
case Success(s) => LoadStateReply(s)
|
||||
case Failure(_) => LoadStateFailed(attempt) // timeout
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def createInitialState[A](
|
||||
currentSeqNr: SeqNr,
|
||||
producer: ActorRef[WorkPullingProducerController.RequestNext[A]]): State[A] =
|
||||
State(currentSeqNr, Set.empty, Map.empty, Map.empty, Map.empty, Map.empty, producer, requested = false)
|
||||
|
||||
}
|
||||
|
||||
private class WorkPullingProducerControllerImpl[A: ClassTag](
|
||||
context: ActorContext[WorkPullingProducerControllerImpl.InternalCommand],
|
||||
stashBuffer: StashBuffer[WorkPullingProducerControllerImpl.InternalCommand],
|
||||
producerId: String,
|
||||
requestNext: WorkPullingProducerController.RequestNext[A],
|
||||
durableQueue: Option[ActorRef[DurableProducerQueue.Command[A]]],
|
||||
settings: WorkPullingProducerController.Settings) {
|
||||
import DurableProducerQueue.MessageSent
|
||||
import DurableProducerQueue.StoreMessageConfirmed
|
||||
import DurableProducerQueue.StoreMessageSent
|
||||
import DurableProducerQueue.StoreMessageSentAck
|
||||
import WorkPullingProducerController.GetWorkerStats
|
||||
import WorkPullingProducerController.MessageWithConfirmation
|
||||
import WorkPullingProducerController.Start
|
||||
import WorkPullingProducerController.WorkerStats
|
||||
import WorkPullingProducerControllerImpl._
|
||||
|
||||
private val durableQueueAskTimeout: Timeout = settings.producerControllerSettings.durableQueueRequestTimeout
|
||||
private val workerAskTimeout: Timeout = settings.internalAskTimeout
|
||||
|
||||
private val workerRequestNextAdapter: ActorRef[ProducerController.RequestNext[A]] =
|
||||
context.messageAdapter(WorkerRequestNext.apply)
|
||||
|
||||
private def active(s: State[A]): Behavior[InternalCommand] = {
|
||||
|
||||
def onMessage(msg: A, wasStashed: Boolean, replyTo: Option[ActorRef[Done]], totalSeqNr: TotalSeqNr): State[A] = {
|
||||
val consumersWithDemand = s.out.iterator.filter { case (_, out) => out.askNextTo.isDefined }.toVector
|
||||
if (context.log.isTraceEnabled)
|
||||
context.log.traceN(
|
||||
"Received message seqNr [{}], wasStashed [{}], consumersWithDemand [{}], hasRequested [{}].",
|
||||
totalSeqNr,
|
||||
wasStashed,
|
||||
consumersWithDemand.map(_._1).mkString(", "),
|
||||
s.requested)
|
||||
if (!s.requested && !wasStashed && durableQueue.isEmpty)
|
||||
throw new IllegalStateException(s"Unexpected message [$msg], wasn't requested nor unstashed.")
|
||||
|
||||
val selectedWorker =
|
||||
if (durableQueue.isDefined) {
|
||||
s.preselectedWorkers.get(totalSeqNr) match {
|
||||
case Some(PreselectedWorker(outKey, confirmationQualifier)) =>
|
||||
s.out.get(outKey) match {
|
||||
case Some(out) => Right(outKey -> out)
|
||||
case None =>
|
||||
// the preselected was deregistered in the meantime
|
||||
context.self ! ResendDurableMsg(msg, confirmationQualifier, totalSeqNr)
|
||||
Left(s)
|
||||
}
|
||||
case None =>
|
||||
throw new IllegalStateException(s"Expected preselected worker for seqNr [$totalSeqNr].")
|
||||
}
|
||||
} else {
|
||||
selectWorker() match {
|
||||
case Some(w) => Right(w)
|
||||
case None =>
|
||||
checkStashFull(stashBuffer)
|
||||
context.log.debug("Stashing message, seqNr [{}]", totalSeqNr)
|
||||
stashBuffer.stash(Msg(msg, wasStashed = true, replyTo))
|
||||
val newRequested = if (wasStashed) s.requested else false
|
||||
Left(s.copy(requested = newRequested))
|
||||
}
|
||||
}
|
||||
|
||||
selectedWorker match {
|
||||
case Right((outKey, out)) =>
|
||||
val newUnconfirmed = out.unconfirmed :+ Unconfirmed(totalSeqNr, out.seqNr, msg, replyTo)
|
||||
val newOut = s.out.updated(outKey, out.copy(unconfirmed = newUnconfirmed, askNextTo = None))
|
||||
implicit val askTimeout: Timeout = workerAskTimeout
|
||||
context.ask[ProducerController.MessageWithConfirmation[A], OutSeqNr](
|
||||
out.askNextTo.get,
|
||||
ProducerController.MessageWithConfirmation(msg, _)) {
|
||||
case Success(seqNr) => Ack(outKey, seqNr)
|
||||
case Failure(_) => AskTimeout(outKey, out.seqNr)
|
||||
}
|
||||
|
||||
def tellRequestNext(): Unit = {
|
||||
context.log.trace("Sending RequestNext to producer, seqNr [{}].", totalSeqNr)
|
||||
s.producer ! requestNext
|
||||
}
|
||||
|
||||
val hasMoreDemand = consumersWithDemand.size >= 2
|
||||
// decision table based on s.hasRequested, wasStashed, hasMoreDemand, stashBuffer.isEmpty
|
||||
val newRequested =
|
||||
if (s.requested && !wasStashed && hasMoreDemand) {
|
||||
// request immediately since more demand
|
||||
tellRequestNext()
|
||||
true
|
||||
} else if (s.requested && !wasStashed && !hasMoreDemand) {
|
||||
// wait until more demand
|
||||
false
|
||||
} else if (!s.requested && wasStashed && hasMoreDemand && stashBuffer.isEmpty) {
|
||||
// msg was unstashed, the last from stash
|
||||
tellRequestNext()
|
||||
true
|
||||
} else if (!s.requested && wasStashed && hasMoreDemand && stashBuffer.nonEmpty) {
|
||||
// more in stash
|
||||
false
|
||||
} else if (!s.requested && wasStashed && !hasMoreDemand) {
|
||||
// wait until more demand
|
||||
false
|
||||
} else if (s.requested && wasStashed) {
|
||||
// msg was unstashed, but pending request alread in progress
|
||||
true
|
||||
} else if (durableQueue.isDefined && !s.requested && !wasStashed) {
|
||||
// msg ResendDurableMsg, and stashed before storage
|
||||
false
|
||||
} else {
|
||||
throw new IllegalStateException(s"Invalid combination of hasRequested [${s.requested}], " +
|
||||
s"wasStashed [$wasStashed], hasMoreDemand [$hasMoreDemand], stashBuffer.isEmpty [${stashBuffer.isEmpty}]")
|
||||
}
|
||||
|
||||
s.copy(out = newOut, requested = newRequested, preselectedWorkers = s.preselectedWorkers - totalSeqNr)
|
||||
|
||||
case Left(newState) =>
|
||||
newState
|
||||
}
|
||||
}
|
||||
|
||||
def workersWithDemand: Vector[(OutKey, OutState[A])] =
|
||||
s.out.iterator.filter { case (_, out) => out.askNextTo.isDefined }.toVector
|
||||
|
||||
def selectWorker(): Option[(OutKey, OutState[A])] = {
|
||||
val preselected = s.preselectedWorkers.valuesIterator.map(_.outKey).toSet
|
||||
val workers = workersWithDemand.filterNot {
|
||||
case (outKey, _) => preselected(outKey)
|
||||
}
|
||||
if (workers.isEmpty) {
|
||||
None
|
||||
} else {
|
||||
val i = ThreadLocalRandom.current().nextInt(workers.size)
|
||||
Some(workers(i))
|
||||
}
|
||||
}
|
||||
|
||||
def onMessageBeforeDurableQueue(msg: A, replyTo: Option[ActorRef[Done]]): Behavior[InternalCommand] = {
|
||||
selectWorker() match {
|
||||
case Some((outKey, out)) =>
|
||||
storeMessageSent(
|
||||
MessageSent(
|
||||
s.currentSeqNr,
|
||||
msg,
|
||||
ack = replyTo.isDefined,
|
||||
out.confirmationQualifier,
|
||||
System.currentTimeMillis()),
|
||||
attempt = 1)
|
||||
val newReplyAfterStore = replyTo match {
|
||||
case None => s.replyAfterStore
|
||||
case Some(r) => s.replyAfterStore.updated(s.currentSeqNr, r)
|
||||
}
|
||||
active(
|
||||
s.copy(
|
||||
currentSeqNr = s.currentSeqNr + 1,
|
||||
preselectedWorkers =
|
||||
s.preselectedWorkers.updated(s.currentSeqNr, PreselectedWorker(outKey, out.confirmationQualifier)),
|
||||
replyAfterStore = newReplyAfterStore))
|
||||
case None =>
|
||||
checkStashFull(stashBuffer)
|
||||
// no demand from any workers, or all already preselected
|
||||
context.log.debug("Stash before storage, seqNr [{}]", s.currentSeqNr)
|
||||
// not stored yet, so don't treat it as stashed
|
||||
stashBuffer.stash(Msg(msg, wasStashed = false, replyTo))
|
||||
active(s)
|
||||
}
|
||||
}
|
||||
|
||||
def onResendDurableMsg(resend: ResendDurableMsg[A]): Behavior[InternalCommand] = {
|
||||
require(durableQueue.isDefined, "Unexpected ResendDurableMsg when DurableQueue not defined.")
|
||||
selectWorker() match {
|
||||
case Some((outKey, out)) =>
|
||||
storeMessageSent(
|
||||
MessageSent(s.currentSeqNr, resend.msg, false, out.confirmationQualifier, System.currentTimeMillis()),
|
||||
attempt = 1)
|
||||
// When StoreMessageSentCompleted (oldConfirmationQualifier, oldSeqNr) confirmation will be stored
|
||||
active(
|
||||
s.copy(
|
||||
currentSeqNr = s.currentSeqNr + 1,
|
||||
preselectedWorkers =
|
||||
s.preselectedWorkers.updated(s.currentSeqNr, PreselectedWorker(outKey, out.confirmationQualifier)),
|
||||
handOver = s.handOver.updated(s.currentSeqNr, HandOver(resend.oldConfirmationQualifier, resend.oldSeqNr))))
|
||||
case None =>
|
||||
checkStashFull(stashBuffer)
|
||||
// no demand from any workers, or all already preselected
|
||||
context.log.debug("Stash before storage of resent durable message, seqNr [{}].", s.currentSeqNr)
|
||||
// not stored yet, so don't treat it as stashed
|
||||
stashBuffer.stash(resend)
|
||||
active(s)
|
||||
}
|
||||
}
|
||||
|
||||
def receiveStoreMessageSentCompleted(seqNr: SeqNr, m: A) = {
|
||||
s.replyAfterStore.get(seqNr).foreach { replyTo =>
|
||||
context.log.trace("Sending reply for seqNr [{}] after storage.", seqNr)
|
||||
replyTo ! Done
|
||||
}
|
||||
|
||||
s.handOver.get(seqNr).foreach {
|
||||
case HandOver(oldConfirmationQualifier, oldSeqNr) =>
|
||||
durableQueue.foreach { d =>
|
||||
d ! StoreMessageConfirmed(oldSeqNr, oldConfirmationQualifier, System.currentTimeMillis())
|
||||
}
|
||||
}
|
||||
|
||||
val newState = onMessage(m, wasStashed = false, replyTo = None, seqNr)
|
||||
active(newState.copy(replyAfterStore = newState.replyAfterStore - seqNr, handOver = newState.handOver - seqNr))
|
||||
}
|
||||
|
||||
def receiveAck(ack: Ack): Behavior[InternalCommand] = {
|
||||
s.out.get(ack.outKey) match {
|
||||
case Some(outState) =>
|
||||
val newUnconfirmed = onAck(outState, ack.confirmedSeqNr)
|
||||
active(s.copy(out = s.out.updated(ack.outKey, outState.copy(unconfirmed = newUnconfirmed))))
|
||||
case None =>
|
||||
// obsolete Next, ConsumerController already deregistered
|
||||
Behaviors.unhandled
|
||||
}
|
||||
}
|
||||
|
||||
def onAck(outState: OutState[A], confirmedSeqNr: OutSeqNr): Vector[Unconfirmed[A]] = {
|
||||
val (confirmed, newUnconfirmed) = outState.unconfirmed.partition {
|
||||
case Unconfirmed(_, seqNr, _, _) => seqNr <= confirmedSeqNr
|
||||
}
|
||||
|
||||
if (confirmed.nonEmpty) {
|
||||
context.log.trace("Received Ack seqNr [{}] from worker [{}].", confirmedSeqNr, outState.confirmationQualifier)
|
||||
confirmed.foreach {
|
||||
case Unconfirmed(_, _, _, None) => // no reply
|
||||
case Unconfirmed(_, _, _, Some(replyTo)) =>
|
||||
replyTo ! Done
|
||||
}
|
||||
|
||||
durableQueue.foreach { d =>
|
||||
// Storing the confirmedSeqNr can be "write behind", at-least-once delivery
|
||||
d ! StoreMessageConfirmed(
|
||||
confirmed.last.totalSeqNr,
|
||||
outState.confirmationQualifier,
|
||||
System.currentTimeMillis())
|
||||
}
|
||||
}
|
||||
|
||||
newUnconfirmed
|
||||
}
|
||||
|
||||
def receiveWorkerRequestNext(w: WorkerRequestNext[A]): Behavior[InternalCommand] = {
|
||||
val next = w.next
|
||||
val outKey = next.producerId
|
||||
s.out.get(outKey) match {
|
||||
case Some(outState) =>
|
||||
val confirmedSeqNr = w.next.confirmedSeqNr
|
||||
context.log.trace2(
|
||||
"Received RequestNext from worker [{}], confirmedSeqNr [{}].",
|
||||
w.next.producerId,
|
||||
confirmedSeqNr)
|
||||
|
||||
val newUnconfirmed = onAck(outState, confirmedSeqNr)
|
||||
|
||||
val newOut =
|
||||
s.out.updated(
|
||||
outKey,
|
||||
outState
|
||||
.copy(seqNr = w.next.currentSeqNr, unconfirmed = newUnconfirmed, askNextTo = Some(next.askNextTo)))
|
||||
|
||||
if (stashBuffer.nonEmpty) {
|
||||
context.log.debug2("Unstash [{}] after RequestNext from worker [{}]", stashBuffer.size, w.next.producerId)
|
||||
stashBuffer.unstashAll(active(s.copy(out = newOut)))
|
||||
} else if (s.requested) {
|
||||
active(s.copy(out = newOut))
|
||||
} else {
|
||||
context.log.trace("Sending RequestNext to producer after RequestNext from worker [{}].", w.next.producerId)
|
||||
s.producer ! requestNext
|
||||
active(s.copy(out = newOut, requested = true))
|
||||
}
|
||||
|
||||
case None =>
|
||||
// obsolete Next, ConsumerController already deregistered
|
||||
Behaviors.unhandled
|
||||
}
|
||||
}
|
||||
|
||||
def receiveCurrentWorkers(curr: CurrentWorkers[A]): Behavior[InternalCommand] = {
|
||||
// TODO #28722 we could also track unreachable workers and avoid them when selecting worker
|
||||
val addedWorkers = curr.workers.diff(s.workers)
|
||||
val removedWorkers = s.workers.diff(curr.workers)
|
||||
|
||||
val newState = addedWorkers.foldLeft(s) { (acc, c) =>
|
||||
val uuid = UUID.randomUUID().toString
|
||||
val outKey = s"$producerId-$uuid"
|
||||
context.log.debug2("Registered worker [{}], with producerId [{}].", c, outKey)
|
||||
val p = context.spawn(
|
||||
ProducerController[A](outKey, durableQueueBehavior = None, settings.producerControllerSettings),
|
||||
uuid)
|
||||
p ! ProducerController.Start(workerRequestNextAdapter)
|
||||
p ! ProducerController.RegisterConsumer(c)
|
||||
acc.copy(out = acc.out.updated(outKey, OutState(p, c, 0L, Vector.empty, None)))
|
||||
}
|
||||
|
||||
val newState2 = removedWorkers.foldLeft(newState) { (acc, c) =>
|
||||
acc.out.find { case (_, outState) => outState.consumerController == c } match {
|
||||
case Some((key, outState)) =>
|
||||
context.log.debug2("Deregistered worker [{}], with producerId [{}].", c, key)
|
||||
context.stop(outState.producerController)
|
||||
// resend the unconfirmed, sending to self since order of messages for WorkPulling doesn't matter anyway
|
||||
if (outState.unconfirmed.nonEmpty)
|
||||
context.log.debugN(
|
||||
"Resending unconfirmed from deregistered worker with producerId [{}], from seqNr [{}] to [{}].",
|
||||
key,
|
||||
outState.unconfirmed.head.outSeqNr,
|
||||
outState.unconfirmed.last.outSeqNr)
|
||||
outState.unconfirmed.foreach {
|
||||
case Unconfirmed(totalSeqNr, _, msg, replyTo) =>
|
||||
if (durableQueue.isEmpty)
|
||||
context.self ! Msg(msg, wasStashed = true, replyTo)
|
||||
else
|
||||
context.self ! ResendDurableMsg(msg, outState.confirmationQualifier, totalSeqNr)
|
||||
}
|
||||
acc.copy(out = acc.out - key)
|
||||
|
||||
case None =>
|
||||
context.log.debug("Deregistered non-existing worker [{}]", c)
|
||||
acc
|
||||
}
|
||||
}
|
||||
|
||||
active(newState2.copy(workers = curr.workers))
|
||||
}
|
||||
|
||||
def receiveStart(start: Start[A]): Behavior[InternalCommand] = {
|
||||
ProducerControllerImpl.enforceLocalProducer(start.producer)
|
||||
context.log.debug("Register new Producer [{}], currentSeqNr [{}].", start.producer, s.currentSeqNr)
|
||||
if (s.requested)
|
||||
start.producer ! requestNext
|
||||
active(s.copy(producer = start.producer))
|
||||
}
|
||||
|
||||
Behaviors.receiveMessage {
|
||||
case Msg(msg: A, wasStashed, replyTo) =>
|
||||
if (durableQueue.isEmpty || wasStashed)
|
||||
active(onMessage(msg, wasStashed, replyTo, s.currentSeqNr))
|
||||
else
|
||||
onMessageBeforeDurableQueue(msg, replyTo)
|
||||
|
||||
case MessageWithConfirmation(msg: A, replyTo) =>
|
||||
if (durableQueue.isEmpty)
|
||||
active(onMessage(msg, wasStashed = false, Some(replyTo), s.currentSeqNr))
|
||||
else
|
||||
onMessageBeforeDurableQueue(msg, Some(replyTo))
|
||||
|
||||
case m: ResendDurableMsg[A] =>
|
||||
onResendDurableMsg(m)
|
||||
|
||||
case StoreMessageSentCompleted(MessageSent(seqNr, m: A, _, _, _)) =>
|
||||
receiveStoreMessageSentCompleted(seqNr, m)
|
||||
|
||||
case f: StoreMessageSentFailed[A] =>
|
||||
receiveStoreMessageSentFailed(f)
|
||||
|
||||
case ack: Ack =>
|
||||
receiveAck(ack)
|
||||
|
||||
case w: WorkerRequestNext[A] =>
|
||||
receiveWorkerRequestNext(w)
|
||||
|
||||
case curr: CurrentWorkers[A] =>
|
||||
receiveCurrentWorkers(curr)
|
||||
|
||||
case GetWorkerStats(replyTo) =>
|
||||
replyTo ! WorkerStats(s.workers.size)
|
||||
Behaviors.same
|
||||
|
||||
case RegisterConsumerDone =>
|
||||
Behaviors.same
|
||||
|
||||
case start: Start[A] =>
|
||||
receiveStart(start)
|
||||
|
||||
case AskTimeout(outKey, outSeqNr) =>
|
||||
context.log.debug(
|
||||
"Message seqNr [{}] sent to worker [{}] timed out. It will be be redelivered.",
|
||||
outSeqNr,
|
||||
outKey)
|
||||
Behaviors.same
|
||||
|
||||
case DurableQueueTerminated =>
|
||||
throw new IllegalStateException("DurableQueue was unexpectedly terminated.")
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private def receiveStoreMessageSentFailed(f: StoreMessageSentFailed[A]): Behavior[InternalCommand] = {
|
||||
if (f.attempt >= settings.producerControllerSettings.durableQueueRetryAttempts) {
|
||||
val errorMessage =
|
||||
s"StoreMessageSentFailed seqNr [${f.messageSent.seqNr}] failed after [${f.attempt}] attempts, giving up."
|
||||
context.log.error(errorMessage)
|
||||
throw new TimeoutException(errorMessage)
|
||||
} else {
|
||||
context.log.warn("StoreMessageSent seqNr [{}] failed, attempt [{}], retrying.", f.messageSent.seqNr, f.attempt)
|
||||
// retry
|
||||
storeMessageSent(f.messageSent, attempt = f.attempt + 1)
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
private def storeMessageSent(messageSent: MessageSent[A], attempt: Int): Unit = {
|
||||
implicit val askTimout: Timeout = durableQueueAskTimeout
|
||||
context.ask[StoreMessageSent[A], StoreMessageSentAck](
|
||||
durableQueue.get,
|
||||
askReplyTo => StoreMessageSent(messageSent, askReplyTo)) {
|
||||
case Success(_) => StoreMessageSentCompleted(messageSent)
|
||||
case Failure(_) => StoreMessageSentFailed(messageSent, attempt) // timeout
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -44,3 +44,32 @@ akka.actor {
|
|||
}
|
||||
}
|
||||
|
||||
akka.reliable-delivery {
|
||||
sharding {
|
||||
producer-controller = ${akka.reliable-delivery.producer-controller}
|
||||
producer-controller {
|
||||
# Limit of how many messages that can be buffered when there
|
||||
# is no demand from the consumer side.
|
||||
buffer-size = 1000
|
||||
|
||||
# Ask timeout for sending message to worker until receiving Ack from worker
|
||||
internal-ask-timeout = 60s
|
||||
|
||||
# If no messages are sent to an entity within this duration the
|
||||
# ProducerController for that entity will be removed.
|
||||
cleanup-unused-after = 120s
|
||||
|
||||
# In case ShardingConsumerController is stopped and there are pending
|
||||
# unconfirmed messages the ShardingConsumerController has to "wake up"
|
||||
# the consumer again by resending the first unconfirmed message.
|
||||
resend-first-unconfirmed-idle-timeout = 10s
|
||||
}
|
||||
|
||||
consumer-controller = ${akka.reliable-delivery.consumer-controller}
|
||||
consumer-controller {
|
||||
# Limit of how many messages that can be buffered before the
|
||||
# ShardingConsumerController is initialized by the Start message.
|
||||
buffer-size = 1000
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,139 @@
|
|||
/*
|
||||
* Copyright (C) 2020-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sharding.typed.delivery
|
||||
|
||||
import java.util.function.{ Function => JFunction }
|
||||
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.ActorSystem
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.annotation.ApiMayChange
|
||||
import akka.cluster.sharding.typed.delivery.internal.ShardingConsumerControllerImpl
|
||||
import com.typesafe.config.Config
|
||||
|
||||
/**
|
||||
* `ShardingConsumerController` is used together with [[ShardingProducerController]]. See the description
|
||||
* in that class or the Akka reference documentation for how they are intended to be used.
|
||||
*
|
||||
* `ShardingConsumerController` is the entity that is initialized in `ClusterSharding`. It will manage
|
||||
* the lifecycle and message delivery to the destination consumer actor.
|
||||
*
|
||||
* The destination consumer actor will start the flow by sending an initial [[ConsumerController.Start]]
|
||||
* message via the `ActorRef` provided in the factory function of the consumer `Behavior`.
|
||||
* The `ActorRef` in the `Start` message is typically constructed as a message adapter to map the
|
||||
* [[ConsumerController.Delivery]] to the protocol of the consumer actor.
|
||||
*
|
||||
* Received messages from the producer are wrapped in [[ConsumerController.Delivery]] when sent to the consumer,
|
||||
* which is supposed to reply with [[ConsumerController.Confirmed]] when it has processed the message.
|
||||
* Next message from a specific producer is not delivered until the previous is confirmed. However, since
|
||||
* there can be several producers, e.g. one per node, sending to the same destination entity there can be
|
||||
* several `Delivery` in flight at the same time.
|
||||
* More messages from a specific producer that arrive while waiting for the confirmation are stashed by
|
||||
* the `ConsumerController` and delivered when previous message was confirmed.
|
||||
*/
|
||||
@ApiMayChange
|
||||
object ShardingConsumerController {
|
||||
|
||||
object Settings {
|
||||
|
||||
/**
|
||||
* Scala API: Factory method from config `akka.reliable-delivery.sharding.consumer-controller`
|
||||
* of the `ActorSystem`.
|
||||
*/
|
||||
def apply(system: ActorSystem[_]): Settings =
|
||||
apply(system.settings.config.getConfig("akka.reliable-delivery.sharding.consumer-controller"))
|
||||
|
||||
/**
|
||||
* Scala API: Factory method from Config corresponding to
|
||||
* `akka.reliable-delivery.sharding.consumer-controller`.
|
||||
*/
|
||||
def apply(config: Config): Settings = {
|
||||
new Settings(bufferSize = config.getInt("buffer-size"), ConsumerController.Settings(config))
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API: Factory method from config `akka.reliable-delivery.sharding.consumer-controller`
|
||||
* of the `ActorSystem`.
|
||||
*/
|
||||
def create(system: ActorSystem[_]): Settings =
|
||||
apply(system)
|
||||
|
||||
/**
|
||||
* Java API: Factory method from Config corresponding to
|
||||
* `akka.reliable-delivery.sharding.consumer-controller`.
|
||||
*/
|
||||
def create(config: Config): Settings =
|
||||
apply(config)
|
||||
}
|
||||
|
||||
final class Settings private (val bufferSize: Int, val consumerControllerSettings: ConsumerController.Settings) {
|
||||
|
||||
def withBufferSize(newBufferSize: Int): Settings =
|
||||
copy(bufferSize = newBufferSize)
|
||||
|
||||
def withConsumerControllerSettings(newConsumerControllerSettings: ConsumerController.Settings): Settings =
|
||||
copy(consumerControllerSettings = newConsumerControllerSettings)
|
||||
|
||||
/**
|
||||
* Private copy method for internal use only.
|
||||
*/
|
||||
private def copy(
|
||||
bufferSize: Int = bufferSize,
|
||||
consumerControllerSettings: ConsumerController.Settings = consumerControllerSettings) =
|
||||
new Settings(bufferSize, consumerControllerSettings)
|
||||
|
||||
override def toString: String =
|
||||
s"Settings($bufferSize,$consumerControllerSettings)"
|
||||
}
|
||||
|
||||
/**
|
||||
* The `Behavior` of the entity that is to be initialized in `ClusterSharding`. It will manage
|
||||
* the lifecycle and message delivery to the destination consumer actor.
|
||||
*/
|
||||
def apply[A, B](consumerBehavior: ActorRef[ConsumerController.Start[A]] => Behavior[B])
|
||||
: Behavior[ConsumerController.SequencedMessage[A]] = {
|
||||
Behaviors.setup { context =>
|
||||
ShardingConsumerControllerImpl(consumerBehavior, Settings(context.system))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The `Behavior` of the entity that is to be initialized in `ClusterSharding`. It will manage
|
||||
* the lifecycle and message delivery to the destination consumer actor.
|
||||
*/
|
||||
def withSettings[A, B](settings: Settings)(consumerBehavior: ActorRef[ConsumerController.Start[A]] => Behavior[B])
|
||||
: Behavior[ConsumerController.SequencedMessage[A]] = {
|
||||
// can't overload apply, loosing type inference
|
||||
ShardingConsumerControllerImpl(consumerBehavior, settings)
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API: The `Behavior` of the entity that is to be initialized in `ClusterSharding`. It will manage
|
||||
* the lifecycle and message delivery to the destination consumer actor.
|
||||
*/
|
||||
def create[A, B](consumerBehavior: JFunction[ActorRef[ConsumerController.Start[A]], Behavior[B]])
|
||||
: Behavior[ConsumerController.SequencedMessage[A]] =
|
||||
apply(consumerBehavior.apply)
|
||||
|
||||
/**
|
||||
* Java API: The `Behavior` of the entity that is to be initialized in `ClusterSharding`. It will manage
|
||||
* the lifecycle and message delivery to the destination consumer actor.
|
||||
*/
|
||||
def create[A, B](
|
||||
consumerBehavior: JFunction[ActorRef[ConsumerController.Start[A]], Behavior[B]],
|
||||
settings: Settings): Behavior[ConsumerController.SequencedMessage[A]] =
|
||||
withSettings(settings)(consumerBehavior.apply)
|
||||
|
||||
/**
|
||||
* Java API: The generic `Class` type for `ConsumerController.SequencedMessage` that can be used when creating
|
||||
* an `EntityTypeKey` for the `ShardedConsumerController` with
|
||||
* `Class<EntityTypeKey<ConsumerController.SequencedMessage<MessageType>>>`.
|
||||
*/
|
||||
def entityTypeKeyClass[A]: Class[ConsumerController.SequencedMessage[A]] =
|
||||
classOf[ConsumerController.SequencedMessage[A]]
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,286 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sharding.typed.delivery
|
||||
|
||||
import java.util.Optional
|
||||
|
||||
import scala.compat.java8.OptionConverters._
|
||||
import scala.concurrent.duration.FiniteDuration
|
||||
import scala.reflect.ClassTag
|
||||
|
||||
import akka.Done
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.ActorSystem
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.delivery.DurableProducerQueue
|
||||
import akka.actor.typed.delivery.ProducerController
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.annotation.ApiMayChange
|
||||
import akka.cluster.sharding.typed.ShardingEnvelope
|
||||
import akka.cluster.sharding.typed.delivery.internal.ShardingProducerControllerImpl
|
||||
import akka.util.JavaDurationConverters._
|
||||
import com.typesafe.config.Config
|
||||
|
||||
/**
|
||||
* Reliable delivery between a producer actor sending messages to sharded consumer
|
||||
* actors receiving the messages.
|
||||
*
|
||||
* The `ShardingProducerController` should be used together with [[ShardingConsumerController]].
|
||||
*
|
||||
* A producer can send messages via a `ShardingProducerController` to any `ShardingConsumerController`
|
||||
* identified by an `entityId`. A single `ShardingProducerController` per `ActorSystem` (node) can be
|
||||
* shared for sending to all entities of a certain entity type. No explicit registration is needed
|
||||
* between the `ShardingConsumerController` and `ShardingProducerController`.
|
||||
*
|
||||
* The producer actor will start the flow by sending a [[ShardingProducerController.Start]]
|
||||
* message to the `ShardingProducerController`. The `ActorRef` in the `Start` message is
|
||||
* typically constructed as a message adapter to map the [[ShardingProducerController.RequestNext]]
|
||||
* to the protocol of the producer actor.
|
||||
*
|
||||
* The `ShardingProducerController` sends `RequestNext` to the producer, which is then allowed
|
||||
* to send one message to the `ShardingProducerController` via the `sendNextTo` in the `RequestNext`.
|
||||
* Thereafter the producer will receive a new `RequestNext` when it's allowed to send one more message.
|
||||
*
|
||||
* In the `RequestNext` message there is information about which entities that have demand. It is allowed
|
||||
* to send to a new `entityId` that is not included in the `RequestNext.entitiesWithDemand`. If sending to
|
||||
* an entity that doesn't have demand the message will be buffered. This support for buffering means that
|
||||
* it is even allowed to send several messages in response to one `RequestNext` but it's recommended to
|
||||
* only send one message and wait for next `RequestNext` before sending more messages.
|
||||
*
|
||||
* The producer and `ShardingProducerController` actors are supposed to be local so that these messages are
|
||||
* fast and not lost. This is enforced by a runtime check.
|
||||
*
|
||||
* There will be one `ShardingConsumerController` for each entity. Many unconfirmed messages can be in
|
||||
* flight between the `ShardingProducerController` and each `ShardingConsumerController`. The flow control
|
||||
* is driven by the consumer side, which means that the `ShardingProducerController` will not send faster
|
||||
* than the demand requested by the consumers.
|
||||
*
|
||||
* Lost messages are detected, resent and deduplicated if needed. This is also driven by the consumer side,
|
||||
* which means that the `ShardingProducerController` will not push resends unless requested by the
|
||||
* `ShardingConsumerController`.
|
||||
*
|
||||
* Until sent messages have been confirmed the `ShardingProducerController` keeps them in memory to be able to
|
||||
* resend them. If the JVM of the `ShardingProducerController` crashes those unconfirmed messages are lost.
|
||||
* To make sure the messages can be delivered also in that scenario the `ShardingProducerController` can be
|
||||
* used with a [[DurableProducerQueue]]. Then the unconfirmed messages are stored in a durable way so
|
||||
* that they can be redelivered when the producer is started again. An implementation of the
|
||||
* `DurableProducerQueue` is provided by `EventSourcedProducerQueue` in `akka-persistence-typed`.
|
||||
*
|
||||
* Instead of using `tell` with the `sendNextTo` in the `RequestNext` the producer can use `context.ask`
|
||||
* with the `askNextTo` in the `RequestNext`. The difference is that a reply is sent back when the
|
||||
* message has been handled. If a `DurableProducerQueue` is used then the reply is sent when the message
|
||||
* has been stored successfully, but it might not have been processed by the consumer yet. Otherwise the
|
||||
* reply is sent after the consumer has processed and confirmed the message.
|
||||
*
|
||||
* It's also possible to use the `ShardingProducerController` and `ShardingConsumerController` without resending
|
||||
* lost messages, but the flow control is still used. This can be more efficient since messages
|
||||
* don't have to be kept in memory in the `ProducerController` until they have been
|
||||
* confirmed, but the drawback is that lost messages will not be delivered. See configuration
|
||||
* `only-flow-control` of the `ShardingConsumerController`.
|
||||
*
|
||||
* The `producerId` is used in logging and included as MDC entry with key `"producerId"`. It's propagated
|
||||
* to the `ConsumerController` and is useful for correlating log messages. It can be any `String` but it's
|
||||
* recommended to use a unique identifier of representing the producer.
|
||||
*/
|
||||
@ApiMayChange // TODO #28719 when removing ApiMayChange consider removing `case class` for some of the messages
|
||||
object ShardingProducerController {
|
||||
|
||||
import ShardingProducerControllerImpl.UnsealedInternalCommand
|
||||
|
||||
type EntityId = String
|
||||
|
||||
sealed trait Command[A] extends UnsealedInternalCommand
|
||||
|
||||
/**
|
||||
* Initial message from the producer actor. The `producer` is typically constructed
|
||||
* as a message adapter to map the [[RequestNext]] to the protocol of the producer actor.
|
||||
*
|
||||
* If the producer is restarted it should send a new `Start` message to the
|
||||
* `ShardingProducerController`.
|
||||
*/
|
||||
final case class Start[A](producer: ActorRef[RequestNext[A]]) extends Command[A]
|
||||
|
||||
/**
|
||||
* For sending confirmation message back to the producer when the message has been confirmed.
|
||||
* Typically used with `context.ask` from the producer.
|
||||
*
|
||||
* If `DurableProducerQueue` is used the confirmation reply is sent when the message has been
|
||||
* successfully stored, meaning that the actual delivery to the consumer may happen later.
|
||||
* If `DurableProducerQueue` is not used the confirmation reply is sent when the message has been
|
||||
* fully delivered, processed, and confirmed by the consumer.
|
||||
*/
|
||||
final case class MessageWithConfirmation[A](entityId: EntityId, message: A, replyTo: ActorRef[Done])
|
||||
extends UnsealedInternalCommand
|
||||
|
||||
/**
|
||||
* The `ProducerController` sends `RequestNext` to the producer when it is allowed to send one
|
||||
* message via the `sendNextTo` or `askNextTo`. It should wait for next `RequestNext` before
|
||||
* sending one more message.
|
||||
*
|
||||
* `entitiesWithDemand` contains information about which entities that have demand. It is allowed
|
||||
* to send to a new `entityId` that is not included in the `entitiesWithDemand`. If sending to
|
||||
* an entity that doesn't have demand the message will be buffered, and that can be seen in the
|
||||
* `bufferedForEntitiesWithoutDemand`.
|
||||
*
|
||||
* This support for buffering means that it is even allowed to send several messages in response
|
||||
* to one `RequestNext` but it's recommended to only send one message and wait for next `RequestNext`
|
||||
* before sending more messages.
|
||||
*/
|
||||
final case class RequestNext[A](
|
||||
sendNextTo: ActorRef[ShardingEnvelope[A]],
|
||||
askNextTo: ActorRef[MessageWithConfirmation[A]],
|
||||
entitiesWithDemand: Set[EntityId],
|
||||
bufferedForEntitiesWithoutDemand: Map[EntityId, Int]) {
|
||||
|
||||
/** Java API */
|
||||
def getEntitiesWithDemand: java.util.Set[String] = {
|
||||
import akka.util.ccompat.JavaConverters._
|
||||
entitiesWithDemand.asJava
|
||||
}
|
||||
|
||||
/** Java API */
|
||||
def getBufferedForEntitiesWithoutDemand: java.util.Map[String, Integer] = {
|
||||
import akka.util.ccompat.JavaConverters._
|
||||
bufferedForEntitiesWithoutDemand.iterator.map { case (k, v) => k -> v.asInstanceOf[Integer] }.toMap.asJava
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API: The generic `Class` type for `ShardingProducerController.RequestNext` that can be used when creating a
|
||||
* `messageAdapter` for `Class<RequestNext<MessageType>>`.
|
||||
*/
|
||||
def requestNextClass[A](): Class[RequestNext[A]] = classOf[RequestNext[A]]
|
||||
|
||||
object Settings {
|
||||
|
||||
/**
|
||||
* Scala API: Factory method from config `akka.reliable-delivery.sharding.producer-controller`
|
||||
* of the `ActorSystem`.
|
||||
*/
|
||||
def apply(system: ActorSystem[_]): Settings =
|
||||
apply(system.settings.config.getConfig("akka.reliable-delivery.sharding.producer-controller"))
|
||||
|
||||
/**
|
||||
* Scala API: Factory method from Config corresponding to
|
||||
* `akka.reliable-delivery.sharding.producer-controller`.
|
||||
*/
|
||||
def apply(config: Config): Settings = {
|
||||
new Settings(
|
||||
bufferSize = config.getInt("buffer-size"),
|
||||
config.getDuration("internal-ask-timeout").asScala,
|
||||
config.getDuration("cleanup-unused-after").asScala,
|
||||
config.getDuration("resend-first-unconfirmed-idle-timeout").asScala,
|
||||
ProducerController.Settings(config))
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API: Factory method from config `akka.reliable-delivery.sharding.producer-controller`
|
||||
* of the `ActorSystem`.
|
||||
*/
|
||||
def create(system: ActorSystem[_]): Settings =
|
||||
apply(system)
|
||||
|
||||
/**
|
||||
* Java API: Factory method from Config corresponding to
|
||||
* `akka.reliable-delivery.sharding.producer-controller`.
|
||||
*/
|
||||
def create(config: Config): Settings =
|
||||
apply(config)
|
||||
}
|
||||
|
||||
final class Settings private (
|
||||
val bufferSize: Int,
|
||||
val internalAskTimeout: FiniteDuration,
|
||||
val cleanupUnusedAfter: FiniteDuration,
|
||||
val resendFirsUnconfirmedIdleTimeout: FiniteDuration,
|
||||
val producerControllerSettings: ProducerController.Settings) {
|
||||
|
||||
def withBufferSize(newBufferSize: Int): Settings =
|
||||
copy(bufferSize = newBufferSize)
|
||||
|
||||
def withInternalAskTimeout(newInternalAskTimeout: FiniteDuration): Settings =
|
||||
copy(internalAskTimeout = newInternalAskTimeout)
|
||||
|
||||
def withInternalAskTimeout(newInternalAskTimeout: java.time.Duration): Settings =
|
||||
copy(internalAskTimeout = newInternalAskTimeout.asScala)
|
||||
|
||||
def withCleanupUnusedAfter(newCleanupUnusedAfter: FiniteDuration): Settings =
|
||||
copy(cleanupUnusedAfter = newCleanupUnusedAfter)
|
||||
|
||||
def withCleanupUnusedAfter(newCleanupUnusedAfter: java.time.Duration): Settings =
|
||||
copy(cleanupUnusedAfter = newCleanupUnusedAfter.asScala)
|
||||
|
||||
def withResendFirsUnconfirmedIdleTimeout(newResendFirsUnconfirmedIdleTimeout: FiniteDuration): Settings =
|
||||
copy(resendFirsUnconfirmedIdleTimeout = newResendFirsUnconfirmedIdleTimeout)
|
||||
|
||||
def withResendFirsUnconfirmedIdleTimeout(newResendFirsUnconfirmedIdleTimeout: java.time.Duration): Settings =
|
||||
copy(resendFirsUnconfirmedIdleTimeout = newResendFirsUnconfirmedIdleTimeout.asScala)
|
||||
|
||||
def withProducerControllerSettings(newProducerControllerSettings: ProducerController.Settings): Settings =
|
||||
copy(producerControllerSettings = newProducerControllerSettings)
|
||||
|
||||
/**
|
||||
* Private copy method for internal use only.
|
||||
*/
|
||||
private def copy(
|
||||
bufferSize: Int = bufferSize,
|
||||
internalAskTimeout: FiniteDuration = internalAskTimeout,
|
||||
cleanupUnusedAfter: FiniteDuration = cleanupUnusedAfter,
|
||||
resendFirsUnconfirmedIdleTimeout: FiniteDuration = resendFirsUnconfirmedIdleTimeout,
|
||||
producerControllerSettings: ProducerController.Settings = producerControllerSettings) =
|
||||
new Settings(
|
||||
bufferSize,
|
||||
internalAskTimeout,
|
||||
cleanupUnusedAfter,
|
||||
resendFirsUnconfirmedIdleTimeout,
|
||||
producerControllerSettings)
|
||||
|
||||
override def toString: String =
|
||||
s"Settings($bufferSize,$internalAskTimeout,$resendFirsUnconfirmedIdleTimeout,$producerControllerSettings)"
|
||||
}
|
||||
|
||||
def apply[A: ClassTag](
|
||||
producerId: String,
|
||||
region: ActorRef[ShardingEnvelope[ConsumerController.SequencedMessage[A]]],
|
||||
durableQueueBehavior: Option[Behavior[DurableProducerQueue.Command[A]]]): Behavior[Command[A]] = {
|
||||
Behaviors.setup { context =>
|
||||
ShardingProducerControllerImpl(producerId, region, durableQueueBehavior, Settings(context.system))
|
||||
}
|
||||
}
|
||||
|
||||
def apply[A: ClassTag](
|
||||
producerId: String,
|
||||
region: ActorRef[ShardingEnvelope[ConsumerController.SequencedMessage[A]]],
|
||||
durableQueueBehavior: Option[Behavior[DurableProducerQueue.Command[A]]],
|
||||
settings: Settings): Behavior[Command[A]] = {
|
||||
ShardingProducerControllerImpl(producerId, region, durableQueueBehavior, settings)
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def create[A](
|
||||
messageClass: Class[A],
|
||||
producerId: String,
|
||||
region: ActorRef[ShardingEnvelope[ConsumerController.SequencedMessage[A]]],
|
||||
durableQueueBehavior: Optional[Behavior[DurableProducerQueue.Command[A]]]): Behavior[Command[A]] = {
|
||||
apply(producerId, region, durableQueueBehavior.asScala)(ClassTag(messageClass))
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def create[A](
|
||||
messageClass: Class[A],
|
||||
producerId: String,
|
||||
region: ActorRef[ShardingEnvelope[ConsumerController.SequencedMessage[A]]],
|
||||
durableQueueBehavior: Optional[Behavior[DurableProducerQueue.Command[A]]],
|
||||
settings: Settings): Behavior[Command[A]] = {
|
||||
apply(producerId, region, durableQueueBehavior.asScala, settings)(ClassTag(messageClass))
|
||||
}
|
||||
|
||||
// TODO maybe there is a need for variant taking message extractor instead of ShardingEnvelope
|
||||
}
|
||||
|
|
@ -0,0 +1,131 @@
|
|||
/*
|
||||
* Copyright (C) 2020-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sharding.typed.delivery.internal
|
||||
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.Terminated
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.delivery.internal.ConsumerControllerImpl
|
||||
import akka.actor.typed.delivery.internal.ProducerControllerImpl
|
||||
import akka.actor.typed.scaladsl.ActorContext
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.annotation.InternalApi
|
||||
import akka.cluster.sharding.typed.delivery.ShardingConsumerController
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] object ShardingConsumerControllerImpl {
|
||||
def apply[A, B](
|
||||
consumerBehavior: ActorRef[ConsumerController.Start[A]] => Behavior[B],
|
||||
settings: ShardingConsumerController.Settings): Behavior[ConsumerController.SequencedMessage[A]] = {
|
||||
Behaviors
|
||||
.setup[ConsumerController.Command[A]] { context =>
|
||||
context.setLoggerName("akka.cluster.sharding.typed.delivery.ShardingConsumerController")
|
||||
val consumer = context.spawn(consumerBehavior(context.self), name = "consumer")
|
||||
context.watch(consumer)
|
||||
waitForStart(context, settings, consumer)
|
||||
}
|
||||
.narrow
|
||||
}
|
||||
|
||||
private def waitForStart[A](
|
||||
context: ActorContext[ConsumerController.Command[A]],
|
||||
settings: ShardingConsumerController.Settings,
|
||||
consumer: ActorRef[_]): Behavior[ConsumerController.Command[A]] = {
|
||||
Behaviors.withStash(settings.bufferSize) { stashBuffer =>
|
||||
Behaviors
|
||||
.receiveMessage[ConsumerController.Command[A]] {
|
||||
case start: ConsumerController.Start[A] =>
|
||||
ConsumerControllerImpl.enforceLocalConsumer(start.deliverTo)
|
||||
context.unwatch(consumer)
|
||||
context.watch(start.deliverTo)
|
||||
stashBuffer.unstashAll(
|
||||
new ShardingConsumerControllerImpl[A](context, start.deliverTo, settings).active(Map.empty, Map.empty))
|
||||
case other =>
|
||||
stashBuffer.stash(other)
|
||||
Behaviors.same
|
||||
}
|
||||
.receiveSignal {
|
||||
case (_, Terminated(`consumer`)) =>
|
||||
context.log.debug("Consumer terminated before initialized.")
|
||||
Behaviors.stopped
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private class ShardingConsumerControllerImpl[A](
|
||||
context: ActorContext[ConsumerController.Command[A]],
|
||||
deliverTo: ActorRef[ConsumerController.Delivery[A]],
|
||||
settings: ShardingConsumerController.Settings) {
|
||||
|
||||
def active(
|
||||
// ProducerController -> producerId
|
||||
producerControllers: Map[ActorRef[ProducerControllerImpl.InternalCommand], String],
|
||||
// producerId -> ConsumerController
|
||||
consumerControllers: Map[String, ActorRef[ConsumerController.Command[A]]])
|
||||
: Behavior[ConsumerController.Command[A]] = {
|
||||
|
||||
Behaviors
|
||||
.receiveMessagePartial[ConsumerController.Command[A]] {
|
||||
case seqMsg: ConsumerController.SequencedMessage[A] =>
|
||||
def updatedProducerControllers(): Map[ActorRef[ProducerControllerImpl.InternalCommand], String] = {
|
||||
producerControllers.get(seqMsg.producerController) match {
|
||||
case Some(_) =>
|
||||
producerControllers
|
||||
case None =>
|
||||
context.watch(seqMsg.producerController)
|
||||
producerControllers.updated(seqMsg.producerController, seqMsg.producerId)
|
||||
}
|
||||
}
|
||||
|
||||
consumerControllers.get(seqMsg.producerId) match {
|
||||
case Some(c) =>
|
||||
c ! seqMsg
|
||||
active(updatedProducerControllers(), consumerControllers)
|
||||
case None =>
|
||||
context.log.debug("Starting ConsumerController for producerId [{}].", seqMsg.producerId)
|
||||
val cc = context.spawn(
|
||||
ConsumerController[A](settings.consumerControllerSettings),
|
||||
s"consumerController-${seqMsg.producerId}")
|
||||
context.watch(cc)
|
||||
cc ! ConsumerController.Start(deliverTo)
|
||||
cc ! seqMsg
|
||||
active(updatedProducerControllers(), consumerControllers.updated(seqMsg.producerId, cc))
|
||||
}
|
||||
}
|
||||
.receiveSignal {
|
||||
case (_, Terminated(`deliverTo`)) =>
|
||||
context.log.debug("Consumer terminated.")
|
||||
Behaviors.stopped
|
||||
case (_, Terminated(ref)) =>
|
||||
val producerControllerRef = ref.unsafeUpcast[ProducerControllerImpl.InternalCommand]
|
||||
producerControllers.get(producerControllerRef) match {
|
||||
case Some(producerId) =>
|
||||
context.log.debug("ProducerController for producerId [{}] terminated.", producerId)
|
||||
val newControllers = producerControllers - producerControllerRef
|
||||
consumerControllers.get(producerId).foreach { cc =>
|
||||
cc ! ConsumerController.DeliverThenStop()
|
||||
}
|
||||
active(newControllers, consumerControllers)
|
||||
case None =>
|
||||
consumerControllers.find { case (_, cc) => ref == cc } match {
|
||||
case Some((producerId, _)) =>
|
||||
context.log.debug("ConsumerController for producerId [{}] terminated.", producerId)
|
||||
val newControllers = consumerControllers - producerId
|
||||
active(producerControllers, newControllers)
|
||||
case None =>
|
||||
context.log.debug("Unknown {} terminated.", ref)
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,598 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sharding.typed.delivery.internal
|
||||
|
||||
import java.util.concurrent.TimeoutException
|
||||
|
||||
import scala.reflect.ClassTag
|
||||
import scala.util.Failure
|
||||
import scala.util.Success
|
||||
|
||||
import akka.Done
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.delivery.DurableProducerQueue
|
||||
import akka.actor.typed.delivery.DurableProducerQueue.ConfirmationQualifier
|
||||
import akka.actor.typed.delivery.DurableProducerQueue.SeqNr
|
||||
import akka.actor.typed.delivery.ProducerController
|
||||
import akka.actor.typed.delivery.internal.ProducerControllerImpl
|
||||
import akka.actor.typed.scaladsl.ActorContext
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.actor.typed.scaladsl.LoggerOps
|
||||
import akka.actor.typed.scaladsl.StashBuffer
|
||||
import akka.annotation.InternalApi
|
||||
import akka.cluster.sharding.typed.ShardingEnvelope
|
||||
import akka.cluster.sharding.typed.delivery.ShardingProducerController
|
||||
import akka.util.Timeout
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] object ShardingProducerControllerImpl {
|
||||
|
||||
import ShardingProducerController.Command
|
||||
import ShardingProducerController.EntityId
|
||||
import ShardingProducerController.RequestNext
|
||||
import ShardingProducerController.Start
|
||||
|
||||
sealed trait InternalCommand
|
||||
|
||||
/** For commands defined in public ShardingProducerController */
|
||||
trait UnsealedInternalCommand extends InternalCommand
|
||||
|
||||
private type TotalSeqNr = Long
|
||||
private type OutSeqNr = Long
|
||||
private type OutKey = String
|
||||
|
||||
private final case class Ack(outKey: OutKey, confirmedSeqNr: OutSeqNr) extends InternalCommand
|
||||
private final case class AskTimeout(outKey: OutKey, outSeqNr: OutSeqNr) extends InternalCommand
|
||||
|
||||
private final case class WrappedRequestNext[A](next: ProducerController.RequestNext[A]) extends InternalCommand
|
||||
|
||||
private final case class Msg[A](envelope: ShardingEnvelope[A], alreadyStored: TotalSeqNr) extends InternalCommand {
|
||||
def isAlreadyStored: Boolean = alreadyStored > 0
|
||||
}
|
||||
|
||||
private case class LoadStateReply[A](state: DurableProducerQueue.State[A]) extends InternalCommand
|
||||
private case class LoadStateFailed(attempt: Int) extends InternalCommand
|
||||
private case class StoreMessageSentReply(ack: DurableProducerQueue.StoreMessageSentAck)
|
||||
private case class StoreMessageSentFailed[A](messageSent: DurableProducerQueue.MessageSent[A], attempt: Int)
|
||||
extends InternalCommand
|
||||
private case class StoreMessageSentCompleted[A](messageSent: DurableProducerQueue.MessageSent[A])
|
||||
extends InternalCommand
|
||||
private case object DurableQueueTerminated extends InternalCommand
|
||||
|
||||
private case object ResendFirstUnconfirmed extends InternalCommand
|
||||
private case object CleanupUnused extends InternalCommand
|
||||
|
||||
private final case class OutState[A](
|
||||
entityId: EntityId,
|
||||
producerController: ActorRef[ProducerController.Command[A]],
|
||||
nextTo: Option[ProducerController.RequestNext[A]],
|
||||
buffered: Vector[Buffered[A]],
|
||||
seqNr: OutSeqNr,
|
||||
unconfirmed: Vector[Unconfirmed[A]],
|
||||
usedNanoTime: Long) {
|
||||
if (nextTo.nonEmpty && buffered.nonEmpty)
|
||||
throw new IllegalStateException("nextTo and buffered shouldn't both be nonEmpty.")
|
||||
}
|
||||
|
||||
private final case class Buffered[A](totalSeqNr: TotalSeqNr, msg: A, replyTo: Option[ActorRef[Done]])
|
||||
|
||||
private final case class Unconfirmed[A](totalSeqNr: TotalSeqNr, outSeqNr: OutSeqNr, replyTo: Option[ActorRef[Done]])
|
||||
|
||||
private final case class State[A](
|
||||
currentSeqNr: TotalSeqNr,
|
||||
producer: ActorRef[ShardingProducerController.RequestNext[A]],
|
||||
out: Map[OutKey, OutState[A]],
|
||||
// replyAfterStore is used when durableQueue is enabled, otherwise they are tracked in OutState
|
||||
replyAfterStore: Map[TotalSeqNr, ActorRef[Done]]) {
|
||||
|
||||
def bufferSize: Long = {
|
||||
out.valuesIterator.foldLeft(0L) { case (acc, outState) => acc + outState.buffered.size }
|
||||
}
|
||||
}
|
||||
|
||||
def apply[A: ClassTag](
|
||||
producerId: String,
|
||||
region: ActorRef[ShardingEnvelope[ConsumerController.SequencedMessage[A]]],
|
||||
durableQueueBehavior: Option[Behavior[DurableProducerQueue.Command[A]]],
|
||||
settings: ShardingProducerController.Settings): Behavior[Command[A]] = {
|
||||
Behaviors
|
||||
.withStash[InternalCommand](settings.bufferSize) { stashBuffer =>
|
||||
Behaviors.setup[InternalCommand] { context =>
|
||||
Behaviors.withMdc(staticMdc = Map("producerId" -> producerId)) {
|
||||
context.setLoggerName("akka.cluster.sharding.typed.delivery.ShardingProducerController")
|
||||
|
||||
val durableQueue = askLoadState(context, durableQueueBehavior, settings)
|
||||
|
||||
waitingForStart(
|
||||
producerId,
|
||||
context,
|
||||
stashBuffer,
|
||||
region,
|
||||
durableQueue,
|
||||
None,
|
||||
createInitialState(durableQueue.nonEmpty),
|
||||
settings)
|
||||
}
|
||||
}
|
||||
}
|
||||
.narrow
|
||||
}
|
||||
|
||||
private def createInitialState[A: ClassTag](hasDurableQueue: Boolean) = {
|
||||
if (hasDurableQueue) None else Some(DurableProducerQueue.State.empty[A])
|
||||
}
|
||||
|
||||
private def waitingForStart[A: ClassTag](
|
||||
producerId: String,
|
||||
context: ActorContext[InternalCommand],
|
||||
stashBuffer: StashBuffer[InternalCommand],
|
||||
region: ActorRef[ShardingEnvelope[ConsumerController.SequencedMessage[A]]],
|
||||
durableQueue: Option[ActorRef[DurableProducerQueue.Command[A]]],
|
||||
producer: Option[ActorRef[RequestNext[A]]],
|
||||
initialState: Option[DurableProducerQueue.State[A]],
|
||||
settings: ShardingProducerController.Settings): Behavior[InternalCommand] = {
|
||||
|
||||
def becomeActive(p: ActorRef[RequestNext[A]], s: DurableProducerQueue.State[A]): Behavior[InternalCommand] = {
|
||||
Behaviors.withTimers { timers =>
|
||||
timers.startTimerWithFixedDelay(CleanupUnused, settings.cleanupUnusedAfter / 2)
|
||||
timers.startTimerWithFixedDelay(ResendFirstUnconfirmed, settings.resendFirsUnconfirmedIdleTimeout / 2)
|
||||
|
||||
// resend unconfirmed before other stashed messages
|
||||
Behaviors.withStash[InternalCommand](settings.bufferSize) { newStashBuffer =>
|
||||
Behaviors.setup { _ =>
|
||||
s.unconfirmed.foreach { m =>
|
||||
newStashBuffer.stash(Msg(ShardingEnvelope(m.confirmationQualifier, m.message), alreadyStored = m.seqNr))
|
||||
}
|
||||
// append other stashed messages after the unconfirmed
|
||||
stashBuffer.foreach(newStashBuffer.stash)
|
||||
|
||||
val msgAdapter: ActorRef[ShardingEnvelope[A]] = context.messageAdapter(msg => Msg(msg, alreadyStored = 0))
|
||||
if (s.unconfirmed.isEmpty)
|
||||
p ! RequestNext(msgAdapter, context.self, Set.empty, Map.empty)
|
||||
val b = new ShardingProducerControllerImpl(context, producerId, msgAdapter, region, durableQueue, settings)
|
||||
.active(State(s.currentSeqNr, p, Map.empty, Map.empty))
|
||||
|
||||
newStashBuffer.unstashAll(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Behaviors.receiveMessage {
|
||||
case start: Start[A] @unchecked =>
|
||||
ProducerControllerImpl.enforceLocalProducer(start.producer)
|
||||
initialState match {
|
||||
case Some(s) =>
|
||||
becomeActive(start.producer, s)
|
||||
case None =>
|
||||
// waiting for LoadStateReply
|
||||
waitingForStart(
|
||||
producerId,
|
||||
context,
|
||||
stashBuffer,
|
||||
region,
|
||||
durableQueue,
|
||||
Some(start.producer),
|
||||
initialState,
|
||||
settings)
|
||||
}
|
||||
|
||||
case load: LoadStateReply[A] @unchecked =>
|
||||
producer match {
|
||||
case Some(p) =>
|
||||
becomeActive(p, load.state)
|
||||
case None =>
|
||||
// waiting for LoadStateReply
|
||||
waitingForStart(
|
||||
producerId,
|
||||
context,
|
||||
stashBuffer,
|
||||
region,
|
||||
durableQueue,
|
||||
producer,
|
||||
Some(load.state),
|
||||
settings)
|
||||
}
|
||||
|
||||
case LoadStateFailed(attempt) =>
|
||||
if (attempt >= settings.producerControllerSettings.durableQueueRetryAttempts) {
|
||||
val errorMessage = s"LoadState failed after [$attempt] attempts, giving up."
|
||||
context.log.error(errorMessage)
|
||||
throw new TimeoutException(errorMessage)
|
||||
} else {
|
||||
context.log.warn(
|
||||
"LoadState failed, attempt [{}] of [{}], retrying.",
|
||||
attempt,
|
||||
settings.producerControllerSettings.durableQueueRetryAttempts)
|
||||
// retry
|
||||
askLoadState(context, durableQueue, settings, attempt + 1)
|
||||
Behaviors.same
|
||||
}
|
||||
|
||||
case DurableQueueTerminated =>
|
||||
throw new IllegalStateException("DurableQueue was unexpectedly terminated.")
|
||||
|
||||
case other =>
|
||||
checkStashFull(stashBuffer)
|
||||
stashBuffer.stash(other)
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
private def checkStashFull[A: ClassTag](stashBuffer: StashBuffer[InternalCommand]): Unit = {
|
||||
if (stashBuffer.isFull)
|
||||
throw new IllegalArgumentException(s"Buffer is full, size [${stashBuffer.size}].")
|
||||
}
|
||||
|
||||
private def askLoadState[A: ClassTag](
|
||||
context: ActorContext[InternalCommand],
|
||||
durableQueueBehavior: Option[Behavior[DurableProducerQueue.Command[A]]],
|
||||
settings: ShardingProducerController.Settings): Option[ActorRef[DurableProducerQueue.Command[A]]] = {
|
||||
|
||||
durableQueueBehavior.map { b =>
|
||||
val ref = context.spawn(b, "durable")
|
||||
context.watchWith(ref, DurableQueueTerminated)
|
||||
askLoadState(context, Some(ref), settings, attempt = 1)
|
||||
ref
|
||||
}
|
||||
}
|
||||
|
||||
private def askLoadState[A: ClassTag](
|
||||
context: ActorContext[InternalCommand],
|
||||
durableQueue: Option[ActorRef[DurableProducerQueue.Command[A]]],
|
||||
settings: ShardingProducerController.Settings,
|
||||
attempt: Int): Unit = {
|
||||
implicit val loadTimeout: Timeout = settings.producerControllerSettings.durableQueueRequestTimeout
|
||||
durableQueue.foreach { ref =>
|
||||
context.ask[DurableProducerQueue.LoadState[A], DurableProducerQueue.State[A]](
|
||||
ref,
|
||||
askReplyTo => DurableProducerQueue.LoadState[A](askReplyTo)) {
|
||||
case Success(s) => LoadStateReply(s)
|
||||
case Failure(_) => LoadStateFailed(attempt) // timeout
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private class ShardingProducerControllerImpl[A: ClassTag](
|
||||
context: ActorContext[ShardingProducerControllerImpl.InternalCommand],
|
||||
producerId: String,
|
||||
msgAdapter: ActorRef[ShardingEnvelope[A]],
|
||||
region: ActorRef[ShardingEnvelope[ConsumerController.SequencedMessage[A]]],
|
||||
durableQueue: Option[ActorRef[DurableProducerQueue.Command[A]]],
|
||||
settings: ShardingProducerController.Settings) {
|
||||
import DurableProducerQueue.MessageSent
|
||||
import DurableProducerQueue.StoreMessageConfirmed
|
||||
import DurableProducerQueue.StoreMessageSent
|
||||
import DurableProducerQueue.StoreMessageSentAck
|
||||
import ShardingProducerController.EntityId
|
||||
import ShardingProducerController.MessageWithConfirmation
|
||||
import ShardingProducerController.RequestNext
|
||||
import ShardingProducerController.Start
|
||||
import ShardingProducerControllerImpl._
|
||||
|
||||
private val durableQueueAskTimeout: Timeout = settings.producerControllerSettings.durableQueueRequestTimeout
|
||||
private val entityAskTimeout: Timeout = settings.internalAskTimeout
|
||||
|
||||
private val requestNextAdapter: ActorRef[ProducerController.RequestNext[A]] =
|
||||
context.messageAdapter(WrappedRequestNext.apply)
|
||||
|
||||
private def active(s: State[A]): Behavior[InternalCommand] = {
|
||||
|
||||
def onMessage(
|
||||
entityId: EntityId,
|
||||
msg: A,
|
||||
replyTo: Option[ActorRef[Done]],
|
||||
totalSeqNr: TotalSeqNr,
|
||||
newReplyAfterStore: Map[TotalSeqNr, ActorRef[Done]]): Behavior[InternalCommand] = {
|
||||
|
||||
val outKey = s"$producerId-$entityId"
|
||||
val newState =
|
||||
s.out.get(outKey) match {
|
||||
case Some(out @ OutState(_, _, Some(nextTo), _, _, _, _)) =>
|
||||
// there is demand, send immediately
|
||||
send(msg, outKey, out.seqNr, nextTo)
|
||||
val newUnconfirmed = out.unconfirmed :+ Unconfirmed(totalSeqNr, out.seqNr, replyTo)
|
||||
s.copy(
|
||||
out = s.out.updated(
|
||||
outKey,
|
||||
out.copy(
|
||||
seqNr = out.seqNr + 1,
|
||||
nextTo = None,
|
||||
unconfirmed = newUnconfirmed,
|
||||
usedNanoTime = System.nanoTime())),
|
||||
replyAfterStore = newReplyAfterStore)
|
||||
case Some(out @ OutState(_, _, None, buffered, _, _, _)) =>
|
||||
// no demand, buffer
|
||||
if (s.bufferSize >= settings.bufferSize)
|
||||
throw new IllegalArgumentException(s"Buffer is full, size [${settings.bufferSize}].")
|
||||
context.log.debug(
|
||||
"Buffering message to entityId [{}], buffer size for entity [{}]",
|
||||
entityId,
|
||||
buffered.size + 1)
|
||||
val newBuffered = buffered :+ Buffered(totalSeqNr, msg, replyTo)
|
||||
val newS =
|
||||
s.copy(
|
||||
out = s.out.updated(outKey, out.copy(buffered = newBuffered)),
|
||||
replyAfterStore = newReplyAfterStore)
|
||||
// send an updated RequestNext to indicate buffer usage
|
||||
s.producer ! createRequestNext(newS)
|
||||
newS
|
||||
case None =>
|
||||
context.log.debug("Creating ProducerController for entity [{}]", entityId)
|
||||
val send: ConsumerController.SequencedMessage[A] => Unit = { seqMsg =>
|
||||
region ! ShardingEnvelope(entityId, seqMsg)
|
||||
}
|
||||
val p = context.spawn(
|
||||
ProducerController[A](outKey, durableQueueBehavior = None, settings.producerControllerSettings, send),
|
||||
entityId)
|
||||
p ! ProducerController.Start(requestNextAdapter)
|
||||
s.copy(
|
||||
out = s.out.updated(
|
||||
outKey,
|
||||
OutState(
|
||||
entityId,
|
||||
p,
|
||||
None,
|
||||
Vector(Buffered(totalSeqNr, msg, replyTo)),
|
||||
1L,
|
||||
Vector.empty,
|
||||
System.nanoTime())),
|
||||
replyAfterStore = newReplyAfterStore)
|
||||
}
|
||||
|
||||
active(newState)
|
||||
}
|
||||
|
||||
def onAck(outState: OutState[A], confirmedSeqNr: OutSeqNr): Vector[Unconfirmed[A]] = {
|
||||
val (confirmed, newUnconfirmed) = outState.unconfirmed.partition {
|
||||
case Unconfirmed(_, seqNr, _) => seqNr <= confirmedSeqNr
|
||||
}
|
||||
|
||||
if (confirmed.nonEmpty) {
|
||||
confirmed.foreach {
|
||||
case Unconfirmed(_, _, None) => // no reply
|
||||
case Unconfirmed(_, _, Some(replyTo)) =>
|
||||
replyTo ! Done
|
||||
}
|
||||
|
||||
durableQueue.foreach { d =>
|
||||
// Storing the confirmedSeqNr can be "write behind", at-least-once delivery
|
||||
d ! StoreMessageConfirmed(confirmed.last.totalSeqNr, outState.entityId, System.currentTimeMillis())
|
||||
}
|
||||
}
|
||||
|
||||
newUnconfirmed
|
||||
}
|
||||
|
||||
def receiveStoreMessageSentCompleted(
|
||||
seqNr: SeqNr,
|
||||
msg: A,
|
||||
entityId: ConfirmationQualifier): Behavior[InternalCommand] = {
|
||||
s.replyAfterStore.get(seqNr).foreach { replyTo =>
|
||||
context.log.info("Confirmation reply to [{}] after storage", seqNr)
|
||||
replyTo ! Done
|
||||
}
|
||||
val newReplyAfterStore = s.replyAfterStore - seqNr
|
||||
|
||||
onMessage(entityId, msg, replyTo = None, seqNr, newReplyAfterStore)
|
||||
}
|
||||
|
||||
def receiveStoreMessageSentFailed(f: StoreMessageSentFailed[A]): Behavior[InternalCommand] = {
|
||||
if (f.attempt >= settings.producerControllerSettings.durableQueueRetryAttempts) {
|
||||
val errorMessage =
|
||||
s"StoreMessageSentFailed seqNr [${f.messageSent.seqNr}] failed after [${f.attempt}] attempts, giving up."
|
||||
context.log.error(errorMessage)
|
||||
throw new TimeoutException(errorMessage)
|
||||
} else {
|
||||
context.log.info(s"StoreMessageSent seqNr [{}] failed, attempt [{}], retrying.", f.messageSent.seqNr, f.attempt)
|
||||
// retry
|
||||
storeMessageSent(f.messageSent, attempt = f.attempt + 1)
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
def receiveAck(ack: Ack): Behavior[InternalCommand] = {
|
||||
s.out.get(ack.outKey) match {
|
||||
case Some(outState) =>
|
||||
context.log.trace2("Received Ack, confirmed [{}], current [{}].", ack.confirmedSeqNr, s.currentSeqNr)
|
||||
val newUnconfirmed = onAck(outState, ack.confirmedSeqNr)
|
||||
val newUsedNanoTime =
|
||||
if (newUnconfirmed.size != outState.unconfirmed.size) System.nanoTime() else outState.usedNanoTime
|
||||
active(
|
||||
s.copy(out =
|
||||
s.out.updated(ack.outKey, outState.copy(unconfirmed = newUnconfirmed, usedNanoTime = newUsedNanoTime))))
|
||||
case None =>
|
||||
// obsolete Ack, ConsumerController already deregistered
|
||||
Behaviors.unhandled
|
||||
}
|
||||
}
|
||||
|
||||
def receiveWrappedRequestNext(w: WrappedRequestNext[A]): Behavior[InternalCommand] = {
|
||||
val next = w.next
|
||||
val outKey = next.producerId
|
||||
s.out.get(outKey) match {
|
||||
case Some(out) =>
|
||||
if (out.nextTo.nonEmpty)
|
||||
throw new IllegalStateException(s"Received RequestNext but already has demand for [$outKey]")
|
||||
|
||||
val confirmedSeqNr = w.next.confirmedSeqNr
|
||||
context.log.trace("Received RequestNext from [{}], confirmed seqNr [{}]", out.entityId, confirmedSeqNr)
|
||||
val newUnconfirmed = onAck(out, confirmedSeqNr)
|
||||
|
||||
if (out.buffered.nonEmpty) {
|
||||
val buf = out.buffered.head
|
||||
send(buf.msg, outKey, out.seqNr, next)
|
||||
val newUnconfirmed2 = newUnconfirmed :+ Unconfirmed(buf.totalSeqNr, out.seqNr, buf.replyTo)
|
||||
val newProducers = s.out.updated(
|
||||
outKey,
|
||||
out.copy(
|
||||
seqNr = out.seqNr + 1,
|
||||
nextTo = None,
|
||||
unconfirmed = newUnconfirmed2,
|
||||
buffered = out.buffered.tail,
|
||||
usedNanoTime = System.nanoTime()))
|
||||
active(s.copy(out = newProducers))
|
||||
} else {
|
||||
val newProducers =
|
||||
s.out.updated(
|
||||
outKey,
|
||||
out.copy(nextTo = Some(next), unconfirmed = newUnconfirmed, usedNanoTime = System.nanoTime()))
|
||||
val newState = s.copy(out = newProducers)
|
||||
// send an updated RequestNext
|
||||
s.producer ! createRequestNext(newState)
|
||||
active(newState)
|
||||
}
|
||||
|
||||
case None =>
|
||||
// if ProducerController was stopped and there was a RequestNext in flight, but will not happen in practise
|
||||
context.log.warn("Received RequestNext for unknown [{}]", outKey)
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
def receiveStart(start: Start[A]): Behavior[InternalCommand] = {
|
||||
ProducerControllerImpl.enforceLocalProducer(start.producer)
|
||||
context.log.debug("Register new Producer [{}], currentSeqNr [{}].", start.producer, s.currentSeqNr)
|
||||
start.producer ! createRequestNext(s)
|
||||
active(s.copy(producer = start.producer))
|
||||
}
|
||||
|
||||
def receiveResendFirstUnconfirmed(): Behavior[InternalCommand] = {
|
||||
val now = System.nanoTime()
|
||||
s.out.foreach {
|
||||
case (outKey: OutKey, outState) =>
|
||||
val idleDurationMillis = (now - outState.usedNanoTime) / 1000 / 1000
|
||||
if (outState.unconfirmed.nonEmpty && idleDurationMillis >= settings.resendFirsUnconfirmedIdleTimeout.toMillis) {
|
||||
context.log.debug(
|
||||
"Resend first unconfirmed for [{}], because it was idle for [{} ms]",
|
||||
outKey,
|
||||
idleDurationMillis)
|
||||
outState.producerController
|
||||
.unsafeUpcast[ProducerControllerImpl.InternalCommand] ! ProducerControllerImpl.ResendFirstUnconfirmed
|
||||
}
|
||||
}
|
||||
Behaviors.same
|
||||
}
|
||||
|
||||
def receiveCleanupUnused(): Behavior[InternalCommand] = {
|
||||
val now = System.nanoTime()
|
||||
val removeOutKeys =
|
||||
s.out.flatMap {
|
||||
case (outKey: OutKey, outState) =>
|
||||
val idleDurationMillis = (now - outState.usedNanoTime) / 1000 / 1000
|
||||
if (outState.unconfirmed.isEmpty && outState.buffered.isEmpty && idleDurationMillis >= settings.cleanupUnusedAfter.toMillis) {
|
||||
context.log.debug("Cleanup unused [{}], because it was idle for [{} ms]", outKey, idleDurationMillis)
|
||||
context.stop(outState.producerController)
|
||||
Some(outKey)
|
||||
} else
|
||||
None
|
||||
}
|
||||
if (removeOutKeys.isEmpty)
|
||||
Behaviors.same
|
||||
else
|
||||
active(s.copy(out = s.out -- removeOutKeys))
|
||||
}
|
||||
|
||||
Behaviors.receiveMessage {
|
||||
|
||||
case msg: Msg[A] =>
|
||||
if (durableQueue.isEmpty) {
|
||||
// currentSeqNr is only updated when durableQueue is enabled
|
||||
onMessage(msg.envelope.entityId, msg.envelope.message, None, s.currentSeqNr, s.replyAfterStore)
|
||||
} else if (msg.isAlreadyStored) {
|
||||
// loaded from durable queue, currentSeqNr has already b
|
||||
onMessage(msg.envelope.entityId, msg.envelope.message, None, msg.alreadyStored, s.replyAfterStore)
|
||||
} else {
|
||||
storeMessageSent(
|
||||
MessageSent(s.currentSeqNr, msg.envelope.message, false, msg.envelope.entityId, System.currentTimeMillis()),
|
||||
attempt = 1)
|
||||
active(s.copy(currentSeqNr = s.currentSeqNr + 1))
|
||||
}
|
||||
|
||||
case MessageWithConfirmation(entityId, message: A, replyTo) =>
|
||||
if (durableQueue.isEmpty) {
|
||||
onMessage(entityId, message, Some(replyTo), s.currentSeqNr, s.replyAfterStore)
|
||||
} else {
|
||||
storeMessageSent(
|
||||
MessageSent(s.currentSeqNr, message, ack = true, entityId, System.currentTimeMillis()),
|
||||
attempt = 1)
|
||||
val newReplyAfterStore = s.replyAfterStore.updated(s.currentSeqNr, replyTo)
|
||||
active(s.copy(currentSeqNr = s.currentSeqNr + 1, replyAfterStore = newReplyAfterStore))
|
||||
}
|
||||
|
||||
case StoreMessageSentCompleted(MessageSent(seqNr, msg: A, _, entityId, _)) =>
|
||||
receiveStoreMessageSentCompleted(seqNr, msg, entityId)
|
||||
|
||||
case f: StoreMessageSentFailed[A] =>
|
||||
receiveStoreMessageSentFailed(f)
|
||||
|
||||
case ack: Ack =>
|
||||
receiveAck(ack)
|
||||
|
||||
case w: WrappedRequestNext[A] =>
|
||||
receiveWrappedRequestNext(w)
|
||||
|
||||
case ResendFirstUnconfirmed =>
|
||||
receiveResendFirstUnconfirmed()
|
||||
|
||||
case CleanupUnused =>
|
||||
receiveCleanupUnused()
|
||||
|
||||
case start: Start[A] =>
|
||||
receiveStart(start)
|
||||
|
||||
case AskTimeout(outKey, outSeqNr) =>
|
||||
context.log.debug(
|
||||
"Message seqNr [{}] sent to entity [{}] timed out. It will be be redelivered.",
|
||||
outSeqNr,
|
||||
outKey)
|
||||
Behaviors.same
|
||||
|
||||
case DurableQueueTerminated =>
|
||||
throw new IllegalStateException("DurableQueue was unexpectedly terminated.")
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private def createRequestNext(s: State[A]): RequestNext[A] = {
|
||||
val entitiesWithDemand = s.out.valuesIterator.collect { case out if out.nextTo.nonEmpty => out.entityId }.toSet
|
||||
val bufferedForEntitesWithoutDemand = s.out.valuesIterator.collect {
|
||||
case out if out.nextTo.isEmpty => out.entityId -> out.buffered.size
|
||||
}.toMap
|
||||
RequestNext(msgAdapter, context.self, entitiesWithDemand, bufferedForEntitesWithoutDemand)
|
||||
}
|
||||
|
||||
private def send(msg: A, outKey: OutKey, outSeqNr: OutSeqNr, nextTo: ProducerController.RequestNext[A]): Unit = {
|
||||
if (context.log.isTraceEnabled)
|
||||
context.log.traceN("Sending [{}] to [{}] with outSeqNr [{}].", msg.getClass.getName, outKey, outSeqNr)
|
||||
implicit val askTimeout: Timeout = entityAskTimeout
|
||||
context.ask[ProducerController.MessageWithConfirmation[A], OutSeqNr](
|
||||
nextTo.askNextTo,
|
||||
ProducerController.MessageWithConfirmation(msg, _)) {
|
||||
case Success(seqNr) =>
|
||||
if (seqNr != outSeqNr)
|
||||
context.log.error("Inconsistent Ack seqNr [{}] != [{}]", seqNr, outSeqNr)
|
||||
Ack(outKey, seqNr)
|
||||
case Failure(_) =>
|
||||
AskTimeout(outKey, outSeqNr)
|
||||
}
|
||||
}
|
||||
|
||||
private def storeMessageSent(messageSent: MessageSent[A], attempt: Int): Unit = {
|
||||
implicit val askTimeout: Timeout = durableQueueAskTimeout
|
||||
context.ask[StoreMessageSent[A], StoreMessageSentAck](
|
||||
durableQueue.get,
|
||||
askReplyTo => StoreMessageSent(messageSent, askReplyTo)) {
|
||||
case Success(_) => StoreMessageSentCompleted(messageSent)
|
||||
case Failure(_) => StoreMessageSentFailed(messageSent, attempt) // timeout
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,170 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package jdocs.delivery;
|
||||
|
||||
// #imports
|
||||
import akka.actor.typed.ActorRef;
|
||||
import akka.actor.typed.Behavior;
|
||||
import akka.actor.typed.delivery.ProducerController;
|
||||
import akka.actor.typed.javadsl.AbstractBehavior;
|
||||
import akka.actor.typed.javadsl.ActorContext;
|
||||
import akka.actor.typed.javadsl.Behaviors;
|
||||
import akka.actor.typed.javadsl.Receive;
|
||||
import java.math.BigInteger;
|
||||
import java.util.Optional;
|
||||
|
||||
// #imports
|
||||
|
||||
// #consumer
|
||||
import akka.actor.typed.delivery.ConsumerController;
|
||||
|
||||
// #consumer
|
||||
|
||||
import akka.actor.typed.ActorSystem;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
interface PointToPointDocExample {
|
||||
|
||||
// #producer
|
||||
public class FibonacciProducer extends AbstractBehavior<FibonacciProducer.Command> {
|
||||
|
||||
private long n = 0;
|
||||
private BigInteger b = BigInteger.ONE;
|
||||
private BigInteger a = BigInteger.ZERO;
|
||||
|
||||
interface Command {}
|
||||
|
||||
private static class WrappedRequestNext implements Command {
|
||||
final ProducerController.RequestNext<FibonacciConsumer.Command> next;
|
||||
|
||||
private WrappedRequestNext(ProducerController.RequestNext<FibonacciConsumer.Command> next) {
|
||||
this.next = next;
|
||||
}
|
||||
}
|
||||
|
||||
private FibonacciProducer(ActorContext<Command> context) {
|
||||
super(context);
|
||||
}
|
||||
|
||||
public static Behavior<Command> create(
|
||||
ActorRef<ProducerController.Command<FibonacciConsumer.Command>> producerController) {
|
||||
return Behaviors.setup(
|
||||
context -> {
|
||||
ActorRef<ProducerController.RequestNext<FibonacciConsumer.Command>> requestNextAdapter =
|
||||
context.messageAdapter(
|
||||
ProducerController.requestNextClass(), WrappedRequestNext::new);
|
||||
producerController.tell(new ProducerController.Start<>(requestNextAdapter));
|
||||
|
||||
return new FibonacciProducer(context);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Receive<Command> createReceive() {
|
||||
return newReceiveBuilder()
|
||||
.onMessage(WrappedRequestNext.class, w -> onWrappedRequestNext(w))
|
||||
.build();
|
||||
}
|
||||
|
||||
private Behavior<Command> onWrappedRequestNext(WrappedRequestNext w) {
|
||||
getContext().getLog().info("Generated fibonacci {}: {}", n, a);
|
||||
w.next.sendNextTo().tell(new FibonacciConsumer.FibonacciNumber(n, a));
|
||||
|
||||
if (n == 1000) {
|
||||
return Behaviors.stopped();
|
||||
} else {
|
||||
n = n + 1;
|
||||
b = a.add(b);
|
||||
a = b;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
||||
// #producer
|
||||
|
||||
// #consumer
|
||||
public class FibonacciConsumer extends AbstractBehavior<FibonacciConsumer.Command> {
|
||||
|
||||
interface Command {}
|
||||
|
||||
public static class FibonacciNumber implements Command {
|
||||
public final long n;
|
||||
public final BigInteger value;
|
||||
|
||||
public FibonacciNumber(long n, BigInteger value) {
|
||||
this.n = n;
|
||||
this.value = value;
|
||||
}
|
||||
}
|
||||
|
||||
private static class WrappedDelivery implements Command {
|
||||
final ConsumerController.Delivery<Command> delivery;
|
||||
|
||||
private WrappedDelivery(ConsumerController.Delivery<Command> delivery) {
|
||||
this.delivery = delivery;
|
||||
}
|
||||
}
|
||||
|
||||
public static Behavior<Command> create(
|
||||
ActorRef<ConsumerController.Command<FibonacciConsumer.Command>> consumerController) {
|
||||
return Behaviors.setup(
|
||||
context -> {
|
||||
ActorRef<ConsumerController.Delivery<FibonacciConsumer.Command>> deliveryAdapter =
|
||||
context.messageAdapter(ConsumerController.deliveryClass(), WrappedDelivery::new);
|
||||
consumerController.tell(new ConsumerController.Start<>(deliveryAdapter));
|
||||
|
||||
return new FibonacciConsumer(context);
|
||||
});
|
||||
}
|
||||
|
||||
private FibonacciConsumer(ActorContext<Command> context) {
|
||||
super(context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Receive<Command> createReceive() {
|
||||
return newReceiveBuilder().onMessage(WrappedDelivery.class, this::onDelivery).build();
|
||||
}
|
||||
|
||||
private Behavior<Command> onDelivery(WrappedDelivery w) {
|
||||
FibonacciNumber number = (FibonacciNumber) w.delivery.message();
|
||||
getContext().getLog().info("Processed fibonacci {}: {}", number.n, number.value);
|
||||
w.delivery.confirmTo().tell(ConsumerController.confirmed());
|
||||
return this;
|
||||
}
|
||||
}
|
||||
// #consumer
|
||||
|
||||
public class Guardian {
|
||||
public static Behavior<Void> create() {
|
||||
return Behaviors.setup(
|
||||
context -> {
|
||||
// #connect
|
||||
ActorRef<ConsumerController.Command<FibonacciConsumer.Command>> consumerController =
|
||||
context.spawn(ConsumerController.create(), "consumerController");
|
||||
context.spawn(FibonacciConsumer.create(consumerController), "consumer");
|
||||
|
||||
String producerId = "fibonacci-" + UUID.randomUUID();
|
||||
ActorRef<ProducerController.Command<FibonacciConsumer.Command>> producerController =
|
||||
context.spawn(
|
||||
ProducerController.create(
|
||||
FibonacciConsumer.Command.class, producerId, Optional.empty()),
|
||||
"producerController");
|
||||
context.spawn(FibonacciProducer.create(producerController), "producer");
|
||||
|
||||
consumerController.tell(
|
||||
new ConsumerController.RegisterToProducerController<>(producerController));
|
||||
// #connect
|
||||
|
||||
return Behaviors.empty();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
ActorSystem.create(Guardian.create(), "FibonacciExample");
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,445 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package jdocs.delivery;
|
||||
|
||||
// #imports
|
||||
import akka.Done;
|
||||
import akka.actor.Address;
|
||||
import akka.actor.typed.ActorRef;
|
||||
import akka.actor.typed.Behavior;
|
||||
import akka.actor.typed.delivery.ConsumerController;
|
||||
import akka.actor.typed.javadsl.AbstractBehavior;
|
||||
import akka.actor.typed.javadsl.ActorContext;
|
||||
import akka.actor.typed.javadsl.Behaviors;
|
||||
import akka.actor.typed.javadsl.Receive;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CompletionStage;
|
||||
|
||||
// #imports
|
||||
|
||||
// #producer
|
||||
import akka.cluster.sharding.typed.delivery.ShardingProducerController;
|
||||
|
||||
// #producer
|
||||
|
||||
// #init
|
||||
import akka.cluster.sharding.typed.delivery.ShardingConsumerController;
|
||||
import akka.cluster.sharding.typed.ShardingEnvelope;
|
||||
import akka.cluster.sharding.typed.javadsl.ClusterSharding;
|
||||
import akka.cluster.sharding.typed.javadsl.Entity;
|
||||
import akka.cluster.sharding.typed.javadsl.EntityTypeKey;
|
||||
import akka.cluster.typed.Cluster;
|
||||
import akka.actor.typed.ActorSystem;
|
||||
|
||||
// #init
|
||||
|
||||
interface ShardingDocExample {
|
||||
|
||||
// #consumer
|
||||
|
||||
interface DB {
|
||||
CompletionStage<Done> save(String id, TodoList.State state);
|
||||
|
||||
CompletionStage<TodoList.State> load(String id);
|
||||
}
|
||||
|
||||
public class TodoList {
|
||||
interface Command {}
|
||||
|
||||
public static class AddTask implements Command {
|
||||
public final String item;
|
||||
|
||||
public AddTask(String item) {
|
||||
this.item = item;
|
||||
}
|
||||
}
|
||||
|
||||
public static class CompleteTask implements Command {
|
||||
public final String item;
|
||||
|
||||
public CompleteTask(String item) {
|
||||
this.item = item;
|
||||
}
|
||||
}
|
||||
|
||||
private static class InitialState implements Command {
|
||||
final State state;
|
||||
|
||||
private InitialState(State state) {
|
||||
this.state = state;
|
||||
}
|
||||
}
|
||||
|
||||
private static class SaveSuccess implements Command {
|
||||
final ActorRef<ConsumerController.Confirmed> confirmTo;
|
||||
|
||||
private SaveSuccess(ActorRef<ConsumerController.Confirmed> confirmTo) {
|
||||
this.confirmTo = confirmTo;
|
||||
}
|
||||
}
|
||||
|
||||
private static class DBError implements Command {
|
||||
final Exception cause;
|
||||
|
||||
private DBError(Throwable cause) {
|
||||
if (cause instanceof Exception) this.cause = (Exception) cause;
|
||||
else this.cause = new RuntimeException(cause.getMessage(), cause);
|
||||
}
|
||||
}
|
||||
|
||||
private static class CommandDelivery implements Command {
|
||||
final Command command;
|
||||
final ActorRef<ConsumerController.Confirmed> confirmTo;
|
||||
|
||||
private CommandDelivery(Command command, ActorRef<ConsumerController.Confirmed> confirmTo) {
|
||||
this.command = command;
|
||||
this.confirmTo = confirmTo;
|
||||
}
|
||||
}
|
||||
|
||||
public static class State {
|
||||
public final List<String> tasks;
|
||||
|
||||
public State(List<String> tasks) {
|
||||
this.tasks = Collections.unmodifiableList(tasks);
|
||||
}
|
||||
|
||||
public State add(String task) {
|
||||
ArrayList<String> copy = new ArrayList<>(tasks);
|
||||
copy.add(task);
|
||||
return new State(copy);
|
||||
}
|
||||
|
||||
public State remove(String task) {
|
||||
ArrayList<String> copy = new ArrayList<>(tasks);
|
||||
copy.remove(task);
|
||||
return new State(copy);
|
||||
}
|
||||
}
|
||||
|
||||
public static Behavior<Command> create(
|
||||
String id, DB db, ActorRef<ConsumerController.Start<Command>> consumerController) {
|
||||
return Init.create(id, db, consumerController);
|
||||
}
|
||||
|
||||
private static Behavior<Command> onDBError(DBError error) throws Exception {
|
||||
throw error.cause;
|
||||
}
|
||||
|
||||
static class Init extends AbstractBehavior<Command> {
|
||||
|
||||
private final String id;
|
||||
private final DB db;
|
||||
private final ActorRef<ConsumerController.Start<Command>> consumerController;
|
||||
|
||||
private Init(
|
||||
ActorContext<Command> context,
|
||||
String id,
|
||||
DB db,
|
||||
ActorRef<ConsumerController.Start<Command>> consumerController) {
|
||||
super(context);
|
||||
this.id = id;
|
||||
this.db = db;
|
||||
this.consumerController = consumerController;
|
||||
}
|
||||
|
||||
static Behavior<Command> create(
|
||||
String id, DB db, ActorRef<ConsumerController.Start<Command>> consumerController) {
|
||||
return Behaviors.setup(
|
||||
context -> {
|
||||
context.pipeToSelf(
|
||||
db.load(id),
|
||||
(state, exc) -> {
|
||||
if (exc == null) return new InitialState(state);
|
||||
else return new DBError(exc);
|
||||
});
|
||||
|
||||
return new Init(context, id, db, consumerController);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Receive<Command> createReceive() {
|
||||
return newReceiveBuilder()
|
||||
.onMessage(InitialState.class, this::onInitialState)
|
||||
.onMessage(DBError.class, TodoList::onDBError)
|
||||
.build();
|
||||
}
|
||||
|
||||
private Behavior<Command> onInitialState(InitialState initial) {
|
||||
ActorRef<ConsumerController.Delivery<Command>> deliveryAdapter =
|
||||
getContext()
|
||||
.messageAdapter(
|
||||
ConsumerController.deliveryClass(),
|
||||
d -> new CommandDelivery(d.message(), d.confirmTo()));
|
||||
consumerController.tell(new ConsumerController.Start<>(deliveryAdapter));
|
||||
|
||||
return Active.create(id, db, initial.state);
|
||||
}
|
||||
}
|
||||
|
||||
static class Active extends AbstractBehavior<Command> {
|
||||
|
||||
private final String id;
|
||||
private final DB db;
|
||||
private State state;
|
||||
|
||||
private Active(ActorContext<Command> context, String id, DB db, State state) {
|
||||
super(context);
|
||||
this.id = id;
|
||||
this.db = db;
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
static Behavior<Command> create(String id, DB db, State state) {
|
||||
return Behaviors.setup(context -> new Active(context, id, db, state));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Receive<Command> createReceive() {
|
||||
return newReceiveBuilder()
|
||||
.onMessage(CommandDelivery.class, this::onDelivery)
|
||||
.onMessage(SaveSuccess.class, this::onSaveSuccess)
|
||||
.onMessage(DBError.class, TodoList::onDBError)
|
||||
.build();
|
||||
}
|
||||
|
||||
private Behavior<Command> onDelivery(CommandDelivery delivery) {
|
||||
if (delivery.command instanceof AddTask) {
|
||||
AddTask addTask = (AddTask) delivery.command;
|
||||
state = state.add(addTask.item);
|
||||
save(state, delivery.confirmTo);
|
||||
return this;
|
||||
} else if (delivery.command instanceof CompleteTask) {
|
||||
CompleteTask completeTask = (CompleteTask) delivery.command;
|
||||
state = state.remove(completeTask.item);
|
||||
save(state, delivery.confirmTo);
|
||||
return this;
|
||||
} else {
|
||||
return Behaviors.unhandled();
|
||||
}
|
||||
}
|
||||
|
||||
private void save(State newState, ActorRef<ConsumerController.Confirmed> confirmTo) {
|
||||
getContext()
|
||||
.pipeToSelf(
|
||||
db.save(id, newState),
|
||||
(state, exc) -> {
|
||||
if (exc == null) return new SaveSuccess(confirmTo);
|
||||
else return new DBError(exc);
|
||||
});
|
||||
}
|
||||
|
||||
private Behavior<Command> onSaveSuccess(SaveSuccess success) {
|
||||
success.confirmTo.tell(ConsumerController.confirmed());
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
||||
// #consumer
|
||||
|
||||
// #producer
|
||||
public class TodoService {
|
||||
|
||||
interface Command {}
|
||||
|
||||
public static class UpdateTodo implements Command {
|
||||
public final String listId;
|
||||
public final String item;
|
||||
public final boolean completed;
|
||||
public final ActorRef<Response> replyTo;
|
||||
|
||||
public UpdateTodo(String listId, String item, boolean completed, ActorRef<Response> replyTo) {
|
||||
this.listId = listId;
|
||||
this.item = item;
|
||||
this.completed = completed;
|
||||
this.replyTo = replyTo;
|
||||
}
|
||||
}
|
||||
|
||||
public enum Response {
|
||||
ACCEPTED,
|
||||
REJECTED,
|
||||
MAYBE_ACCEPTED
|
||||
}
|
||||
|
||||
private static class Confirmed implements Command {
|
||||
final ActorRef<Response> originalReplyTo;
|
||||
|
||||
private Confirmed(ActorRef<Response> originalReplyTo) {
|
||||
this.originalReplyTo = originalReplyTo;
|
||||
}
|
||||
}
|
||||
|
||||
private static class TimedOut implements Command {
|
||||
final ActorRef<Response> originalReplyTo;
|
||||
|
||||
private TimedOut(ActorRef<Response> originalReplyTo) {
|
||||
this.originalReplyTo = originalReplyTo;
|
||||
}
|
||||
}
|
||||
|
||||
private static class WrappedRequestNext implements Command {
|
||||
final ShardingProducerController.RequestNext<TodoList.Command> next;
|
||||
|
||||
private WrappedRequestNext(ShardingProducerController.RequestNext<TodoList.Command> next) {
|
||||
this.next = next;
|
||||
}
|
||||
}
|
||||
|
||||
public static Behavior<Command> create(
|
||||
ActorRef<ShardingProducerController.Command<TodoList.Command>> producerController) {
|
||||
return Init.create(producerController);
|
||||
}
|
||||
|
||||
static class Init extends AbstractBehavior<TodoService.Command> {
|
||||
|
||||
static Behavior<Command> create(
|
||||
ActorRef<ShardingProducerController.Command<TodoList.Command>> producerController) {
|
||||
return Behaviors.setup(
|
||||
context -> {
|
||||
ActorRef<ShardingProducerController.RequestNext<TodoList.Command>>
|
||||
requestNextAdapter =
|
||||
context.messageAdapter(
|
||||
ShardingProducerController.requestNextClass(), WrappedRequestNext::new);
|
||||
producerController.tell(new ShardingProducerController.Start<>(requestNextAdapter));
|
||||
|
||||
return new Init(context);
|
||||
});
|
||||
}
|
||||
|
||||
private Init(ActorContext<Command> context) {
|
||||
super(context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Receive<Command> createReceive() {
|
||||
return newReceiveBuilder()
|
||||
.onMessage(WrappedRequestNext.class, w -> Active.create(w.next))
|
||||
.onMessage(
|
||||
UpdateTodo.class,
|
||||
command -> {
|
||||
// not hooked up with shardingProducerController yet
|
||||
command.replyTo.tell(Response.REJECTED);
|
||||
return this;
|
||||
})
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
static class Active extends AbstractBehavior<TodoService.Command> {
|
||||
|
||||
private ShardingProducerController.RequestNext<TodoList.Command> requestNext;
|
||||
|
||||
static Behavior<Command> create(
|
||||
ShardingProducerController.RequestNext<TodoList.Command> requestNext) {
|
||||
return Behaviors.setup(context -> new Active(context, requestNext));
|
||||
}
|
||||
|
||||
private Active(
|
||||
ActorContext<Command> context,
|
||||
ShardingProducerController.RequestNext<TodoList.Command> requestNext) {
|
||||
super(context);
|
||||
this.requestNext = requestNext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Receive<Command> createReceive() {
|
||||
return newReceiveBuilder()
|
||||
.onMessage(WrappedRequestNext.class, this::onRequestNext)
|
||||
.onMessage(UpdateTodo.class, this::onUpdateTodo)
|
||||
.onMessage(Confirmed.class, this::onConfirmed)
|
||||
.onMessage(TimedOut.class, this::onTimedOut)
|
||||
.build();
|
||||
}
|
||||
|
||||
private Behavior<Command> onRequestNext(WrappedRequestNext w) {
|
||||
requestNext = w.next;
|
||||
return this;
|
||||
}
|
||||
|
||||
private Behavior<Command> onUpdateTodo(UpdateTodo command) {
|
||||
Integer buffered = requestNext.getBufferedForEntitiesWithoutDemand().get(command.listId);
|
||||
if (buffered != null && buffered >= 100) {
|
||||
command.replyTo.tell(Response.REJECTED);
|
||||
} else {
|
||||
TodoList.Command requestMsg;
|
||||
if (command.completed) requestMsg = new TodoList.CompleteTask(command.item);
|
||||
else requestMsg = new TodoList.AddTask(command.item);
|
||||
getContext()
|
||||
.ask(
|
||||
Done.class,
|
||||
requestNext.askNextTo(),
|
||||
Duration.ofSeconds(5),
|
||||
askReplyTo ->
|
||||
new ShardingProducerController.MessageWithConfirmation<>(
|
||||
command.listId, requestMsg, askReplyTo),
|
||||
(done, exc) -> {
|
||||
if (exc == null) return new Confirmed(command.replyTo);
|
||||
else return new TimedOut(command.replyTo);
|
||||
});
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
private Behavior<Command> onConfirmed(Confirmed confirmed) {
|
||||
confirmed.originalReplyTo.tell(Response.ACCEPTED);
|
||||
return this;
|
||||
}
|
||||
|
||||
private Behavior<Command> onTimedOut(TimedOut timedOut) {
|
||||
timedOut.originalReplyTo.tell(Response.MAYBE_ACCEPTED);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
||||
// #producer
|
||||
|
||||
static void illustrateInit() {
|
||||
Behaviors.setup(
|
||||
context -> {
|
||||
// #init
|
||||
final DB db = theDatbaseImplementation();
|
||||
|
||||
ActorSystem<Void> system = context.getSystem();
|
||||
|
||||
EntityTypeKey<ConsumerController.SequencedMessage<TodoList.Command>> entityTypeKey =
|
||||
EntityTypeKey.create(ShardingConsumerController.entityTypeKeyClass(), "todo");
|
||||
|
||||
ActorRef<ShardingEnvelope<ConsumerController.SequencedMessage<TodoList.Command>>> region =
|
||||
ClusterSharding.get(system)
|
||||
.init(
|
||||
Entity.of(
|
||||
entityTypeKey,
|
||||
entityContext ->
|
||||
ShardingConsumerController.create(
|
||||
start ->
|
||||
TodoList.create(entityContext.getEntityId(), db, start))));
|
||||
|
||||
Address selfAddress = Cluster.get(system).selfMember().address();
|
||||
String producerId = "todo-producer-" + selfAddress.host() + ":" + selfAddress.port();
|
||||
|
||||
ActorRef<ShardingProducerController.Command<TodoList.Command>> producerController =
|
||||
context.spawn(
|
||||
ShardingProducerController.create(
|
||||
TodoList.Command.class, producerId, region, Optional.empty()),
|
||||
"producerController");
|
||||
|
||||
context.spawn(TodoService.create(producerController), "producer");
|
||||
// #init
|
||||
|
||||
return Behaviors.empty();
|
||||
});
|
||||
}
|
||||
|
||||
static DB theDatbaseImplementation() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,364 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package jdocs.delivery;
|
||||
|
||||
// #imports
|
||||
import akka.actor.typed.ActorRef;
|
||||
import akka.actor.typed.Behavior;
|
||||
import akka.actor.typed.delivery.ConsumerController;
|
||||
import akka.actor.typed.delivery.DurableProducerQueue;
|
||||
import akka.actor.typed.javadsl.ActorContext;
|
||||
import akka.actor.typed.javadsl.Behaviors;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.Optional;
|
||||
import java.util.UUID;
|
||||
|
||||
// #imports
|
||||
|
||||
// #producer
|
||||
import akka.actor.typed.delivery.WorkPullingProducerController;
|
||||
import akka.Done;
|
||||
|
||||
// #producer
|
||||
|
||||
// #durable-queue
|
||||
import akka.persistence.typed.PersistenceId;
|
||||
import akka.persistence.typed.delivery.EventSourcedProducerQueue;
|
||||
|
||||
// #durable-queue
|
||||
|
||||
import akka.actor.typed.javadsl.StashBuffer;
|
||||
import akka.actor.typed.receptionist.ServiceKey;
|
||||
|
||||
interface WorkPullingDocExample {
|
||||
|
||||
// #consumer
|
||||
public class ImageConverter {
|
||||
interface Command {}
|
||||
|
||||
public static class ConversionJob {
|
||||
public final UUID resultId;
|
||||
public final String fromFormat;
|
||||
public final String toFormat;
|
||||
public final byte[] image;
|
||||
|
||||
public ConversionJob(UUID resultId, String fromFormat, String toFormat, byte[] image) {
|
||||
this.resultId = resultId;
|
||||
this.fromFormat = fromFormat;
|
||||
this.toFormat = toFormat;
|
||||
this.image = image;
|
||||
}
|
||||
}
|
||||
|
||||
private static class WrappedDelivery implements Command {
|
||||
final ConsumerController.Delivery<ConversionJob> delivery;
|
||||
|
||||
private WrappedDelivery(ConsumerController.Delivery<ConversionJob> delivery) {
|
||||
this.delivery = delivery;
|
||||
}
|
||||
}
|
||||
|
||||
public static ServiceKey<ConsumerController.Command<ConversionJob>> serviceKey =
|
||||
ServiceKey.create(ConsumerController.serviceKeyClass(), "ImageConverter");
|
||||
|
||||
public static Behavior<Command> create() {
|
||||
return Behaviors.setup(
|
||||
context -> {
|
||||
ActorRef<ConsumerController.Delivery<ConversionJob>> deliveryAdapter =
|
||||
context.messageAdapter(ConsumerController.deliveryClass(), WrappedDelivery::new);
|
||||
ActorRef<ConsumerController.Command<ConversionJob>> consumerController =
|
||||
context.spawn(ConsumerController.create(serviceKey), "consumerController");
|
||||
consumerController.tell(new ConsumerController.Start<>(deliveryAdapter));
|
||||
|
||||
return Behaviors.receive(Command.class)
|
||||
.onMessage(WrappedDelivery.class, ImageConverter::onDelivery)
|
||||
.build();
|
||||
});
|
||||
}
|
||||
|
||||
private static Behavior<Command> onDelivery(WrappedDelivery w) {
|
||||
byte[] image = w.delivery.message().image;
|
||||
String fromFormat = w.delivery.message().fromFormat;
|
||||
String toFormat = w.delivery.message().toFormat;
|
||||
// convert image...
|
||||
// store result with resultId key for later retrieval
|
||||
|
||||
// and when completed confirm
|
||||
w.delivery.confirmTo().tell(ConsumerController.confirmed());
|
||||
|
||||
return Behaviors.same();
|
||||
}
|
||||
}
|
||||
// #consumer
|
||||
|
||||
// #producer
|
||||
public class ImageWorkManager {
|
||||
|
||||
interface Command {}
|
||||
|
||||
public static class Convert implements Command {
|
||||
public final String fromFormat;
|
||||
public final String toFormat;
|
||||
public final byte[] image;
|
||||
|
||||
public Convert(String fromFormat, String toFormat, byte[] image) {
|
||||
this.fromFormat = fromFormat;
|
||||
this.toFormat = toFormat;
|
||||
this.image = image;
|
||||
}
|
||||
}
|
||||
|
||||
public static class GetResult implements Command {
|
||||
public final UUID resultId;
|
||||
public final ActorRef<Optional<byte[]>> replyTo;
|
||||
|
||||
public GetResult(UUID resultId, ActorRef<Optional<byte[]>> replyTo) {
|
||||
this.resultId = resultId;
|
||||
this.replyTo = replyTo;
|
||||
}
|
||||
}
|
||||
|
||||
private static class WrappedRequestNext implements Command {
|
||||
final WorkPullingProducerController.RequestNext<ImageConverter.ConversionJob> next;
|
||||
|
||||
private WrappedRequestNext(
|
||||
WorkPullingProducerController.RequestNext<ImageConverter.ConversionJob> next) {
|
||||
this.next = next;
|
||||
}
|
||||
}
|
||||
|
||||
// #producer
|
||||
// #ask
|
||||
public static class ConvertRequest implements Command {
|
||||
public final String fromFormat;
|
||||
public final String toFormat;
|
||||
public final byte[] image;
|
||||
public final ActorRef<ConvertResponse> replyTo;
|
||||
|
||||
public ConvertRequest(
|
||||
String fromFormat, String toFormat, byte[] image, ActorRef<ConvertResponse> replyTo) {
|
||||
this.fromFormat = fromFormat;
|
||||
this.toFormat = toFormat;
|
||||
this.image = image;
|
||||
this.replyTo = replyTo;
|
||||
}
|
||||
}
|
||||
|
||||
interface ConvertResponse {}
|
||||
|
||||
public static class ConvertAccepted implements ConvertResponse {
|
||||
public final UUID resultId;
|
||||
|
||||
public ConvertAccepted(UUID resultId) {
|
||||
this.resultId = resultId;
|
||||
}
|
||||
}
|
||||
|
||||
enum ConvertRejected implements ConvertResponse {
|
||||
INSTANCE
|
||||
}
|
||||
|
||||
public static class ConvertTimedOut implements ConvertResponse {
|
||||
public final UUID resultId;
|
||||
|
||||
public ConvertTimedOut(UUID resultId) {
|
||||
this.resultId = resultId;
|
||||
}
|
||||
}
|
||||
|
||||
private static class AskReply implements Command {
|
||||
final UUID resultId;
|
||||
final ActorRef<ConvertResponse> originalReplyTo;
|
||||
final boolean timeout;
|
||||
|
||||
private AskReply(UUID resultId, ActorRef<ConvertResponse> originalReplyTo, boolean timeout) {
|
||||
this.resultId = resultId;
|
||||
this.originalReplyTo = originalReplyTo;
|
||||
this.timeout = timeout;
|
||||
}
|
||||
}
|
||||
|
||||
// #ask
|
||||
// #producer
|
||||
|
||||
private final ActorContext<Command> context;
|
||||
private final StashBuffer<Command> stashBuffer;
|
||||
|
||||
private ImageWorkManager(ActorContext<Command> context, StashBuffer<Command> stashBuffer) {
|
||||
this.context = context;
|
||||
this.stashBuffer = stashBuffer;
|
||||
}
|
||||
|
||||
public static Behavior<Command> create() {
|
||||
return Behaviors.setup(
|
||||
context -> {
|
||||
ActorRef<WorkPullingProducerController.RequestNext<ImageConverter.ConversionJob>>
|
||||
requestNextAdapter =
|
||||
context.messageAdapter(
|
||||
WorkPullingProducerController.requestNextClass(), WrappedRequestNext::new);
|
||||
ActorRef<WorkPullingProducerController.Command<ImageConverter.ConversionJob>>
|
||||
producerController =
|
||||
context.spawn(
|
||||
WorkPullingProducerController.create(
|
||||
ImageConverter.ConversionJob.class,
|
||||
"workManager",
|
||||
ImageConverter.serviceKey,
|
||||
Optional.empty()),
|
||||
"producerController");
|
||||
// #producer
|
||||
// #durable-queue
|
||||
Behavior<DurableProducerQueue.Command<ImageConverter.ConversionJob>> durableQueue =
|
||||
EventSourcedProducerQueue.create(PersistenceId.ofUniqueId("ImageWorkManager"));
|
||||
ActorRef<WorkPullingProducerController.Command<ImageConverter.ConversionJob>>
|
||||
durableProducerController =
|
||||
context.spawn(
|
||||
WorkPullingProducerController.create(
|
||||
ImageConverter.ConversionJob.class,
|
||||
"workManager",
|
||||
ImageConverter.serviceKey,
|
||||
Optional.of(durableQueue)),
|
||||
"producerController");
|
||||
// #durable-queue
|
||||
// #producer
|
||||
producerController.tell(new WorkPullingProducerController.Start<>(requestNextAdapter));
|
||||
|
||||
return Behaviors.withStash(
|
||||
1000, stashBuffer -> new ImageWorkManager(context, stashBuffer).waitForNext());
|
||||
});
|
||||
}
|
||||
|
||||
private Behavior<Command> waitForNext() {
|
||||
return Behaviors.receive(Command.class)
|
||||
.onMessage(WrappedRequestNext.class, this::onWrappedRequestNext)
|
||||
.onMessage(Convert.class, this::onConvertWait)
|
||||
.onMessage(GetResult.class, this::onGetResult)
|
||||
.build();
|
||||
}
|
||||
|
||||
private Behavior<Command> onWrappedRequestNext(WrappedRequestNext w) {
|
||||
return stashBuffer.unstashAll(active(w.next));
|
||||
}
|
||||
|
||||
private Behavior<Command> onConvertWait(Convert convert) {
|
||||
if (stashBuffer.isFull()) {
|
||||
context.getLog().warn("Too many Convert requests.");
|
||||
return Behaviors.same();
|
||||
} else {
|
||||
stashBuffer.stash(convert);
|
||||
return Behaviors.same();
|
||||
}
|
||||
}
|
||||
|
||||
private Behavior<Command> onGetResult(GetResult get) {
|
||||
// TODO retrieve the stored result and reply
|
||||
return Behaviors.same();
|
||||
}
|
||||
|
||||
private Behavior<Command> active(
|
||||
WorkPullingProducerController.RequestNext<ImageConverter.ConversionJob> next) {
|
||||
return Behaviors.receive(Command.class)
|
||||
.onMessage(Convert.class, c -> onConvert(c, next))
|
||||
.onMessage(GetResult.class, this::onGetResult)
|
||||
.onMessage(WrappedRequestNext.class, this::onUnexpectedWrappedRequestNext)
|
||||
.build();
|
||||
}
|
||||
|
||||
private Behavior<Command> onUnexpectedWrappedRequestNext(WrappedRequestNext w) {
|
||||
throw new IllegalStateException("Unexpected RequestNext");
|
||||
}
|
||||
|
||||
private Behavior<Command> onConvert(
|
||||
Convert convert,
|
||||
WorkPullingProducerController.RequestNext<ImageConverter.ConversionJob> next) {
|
||||
UUID resultId = UUID.randomUUID();
|
||||
next.sendNextTo()
|
||||
.tell(
|
||||
new ImageConverter.ConversionJob(
|
||||
resultId, convert.fromFormat, convert.toFormat, convert.image));
|
||||
return waitForNext();
|
||||
}
|
||||
// #producer
|
||||
|
||||
Object askScope =
|
||||
new Object() {
|
||||
// #ask
|
||||
private Behavior<Command> waitForNext() {
|
||||
return Behaviors.receive(Command.class)
|
||||
.onMessage(WrappedRequestNext.class, this::onWrappedRequestNext)
|
||||
.onMessage(ConvertRequest.class, this::onConvertRequestWait)
|
||||
.onMessage(AskReply.class, this::onAskReply)
|
||||
.onMessage(GetResult.class, this::onGetResult)
|
||||
.build();
|
||||
}
|
||||
|
||||
private Behavior<Command> onConvertRequestWait(ConvertRequest convert) {
|
||||
if (stashBuffer.isFull()) {
|
||||
convert.replyTo.tell(ConvertRejected.INSTANCE);
|
||||
return Behaviors.same();
|
||||
} else {
|
||||
stashBuffer.stash(convert);
|
||||
return Behaviors.same();
|
||||
}
|
||||
}
|
||||
|
||||
private Behavior<Command> onAskReply(AskReply reply) {
|
||||
if (reply.timeout) reply.originalReplyTo.tell(new ConvertTimedOut(reply.resultId));
|
||||
else reply.originalReplyTo.tell(new ConvertAccepted(reply.resultId));
|
||||
return Behaviors.same();
|
||||
}
|
||||
|
||||
private Behavior<Command> onWrappedRequestNext(WrappedRequestNext w) {
|
||||
return stashBuffer.unstashAll(active(w.next));
|
||||
}
|
||||
|
||||
private Behavior<Command> onGetResult(GetResult get) {
|
||||
// TODO retrieve the stored result and reply
|
||||
return Behaviors.same();
|
||||
}
|
||||
|
||||
private Behavior<Command> active(
|
||||
WorkPullingProducerController.RequestNext<ImageConverter.ConversionJob> next) {
|
||||
return Behaviors.receive(Command.class)
|
||||
.onMessage(ConvertRequest.class, c -> onConvertRequest(c, next))
|
||||
.onMessage(AskReply.class, this::onAskReply)
|
||||
.onMessage(GetResult.class, this::onGetResult)
|
||||
.onMessage(WrappedRequestNext.class, this::onUnexpectedWrappedRequestNext)
|
||||
.build();
|
||||
}
|
||||
|
||||
private Behavior<Command> onConvertRequest(
|
||||
ConvertRequest convert,
|
||||
WorkPullingProducerController.RequestNext<ImageConverter.ConversionJob> next) {
|
||||
UUID resultId = UUID.randomUUID();
|
||||
|
||||
context.ask(
|
||||
Done.class,
|
||||
next.askNextTo(),
|
||||
Duration.ofSeconds(5),
|
||||
askReplyTo ->
|
||||
new WorkPullingProducerController.MessageWithConfirmation<>(
|
||||
new ImageConverter.ConversionJob(
|
||||
resultId, convert.fromFormat, convert.toFormat, convert.image),
|
||||
askReplyTo),
|
||||
(done, exc) -> {
|
||||
if (exc == null) return new AskReply(resultId, convert.replyTo, false);
|
||||
else return new AskReply(resultId, convert.replyTo, true);
|
||||
});
|
||||
|
||||
return waitForNext();
|
||||
}
|
||||
|
||||
private Behavior<Command> onUnexpectedWrappedRequestNext(WrappedRequestNext w) {
|
||||
throw new IllegalStateException("Unexpected RequestNext");
|
||||
}
|
||||
|
||||
// #ask
|
||||
};
|
||||
// #producer
|
||||
}
|
||||
// #producer
|
||||
|
||||
}
|
||||
|
|
@ -25,7 +25,7 @@
|
|||
<appender-ref ref="STDOUT"/>
|
||||
</logger>
|
||||
|
||||
<root level="DEBUG">
|
||||
<root level="TRACE">
|
||||
<appender-ref ref="CapturingAppender"/>
|
||||
</root>
|
||||
</configuration>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,230 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sharding.typed.delivery
|
||||
|
||||
import java.util.UUID
|
||||
|
||||
import akka.Done
|
||||
import akka.actor.testkit.typed.scaladsl.LogCapturing
|
||||
import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.delivery.ConsumerController.SequencedMessage
|
||||
import akka.actor.typed.delivery.DurableProducerQueue
|
||||
import akka.actor.typed.delivery.TestConsumer
|
||||
import akka.actor.typed.eventstream.EventStream
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.cluster.sharding.typed.ShardingEnvelope
|
||||
import akka.cluster.sharding.typed.scaladsl.ClusterSharding
|
||||
import akka.cluster.sharding.typed.scaladsl.Entity
|
||||
import akka.cluster.sharding.typed.scaladsl.EntityTypeKey
|
||||
import akka.cluster.typed.Cluster
|
||||
import akka.cluster.typed.Join
|
||||
import akka.persistence.journal.inmem.InmemJournal
|
||||
import akka.persistence.typed.PersistenceId
|
||||
import akka.persistence.typed.delivery.EventSourcedProducerQueue
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
object DurableShardingSpec {
|
||||
def conf: Config =
|
||||
ConfigFactory.parseString(s"""
|
||||
akka.actor.provider = cluster
|
||||
akka.remote.classic.netty.tcp.port = 0
|
||||
akka.remote.artery.canonical.port = 0
|
||||
akka.persistence.journal.plugin = "akka.persistence.journal.inmem"
|
||||
akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local"
|
||||
akka.persistence.snapshot-store.local.dir = "target/DurableShardingSpec-${UUID.randomUUID().toString}"
|
||||
akka.reliable-delivery.consumer-controller.flow-control-window = 20
|
||||
""")
|
||||
}
|
||||
|
||||
class DurableShardingSpec
|
||||
extends ScalaTestWithActorTestKit(DurableShardingSpec.conf)
|
||||
with AnyWordSpecLike
|
||||
with LogCapturing {
|
||||
|
||||
private var idCount = 0
|
||||
private def nextId(): Int = {
|
||||
idCount += 1
|
||||
idCount
|
||||
}
|
||||
|
||||
private def producerId: String = s"p-$idCount"
|
||||
|
||||
private val journalOperations = createTestProbe[InmemJournal.Operation]()
|
||||
system.eventStream ! EventStream.Subscribe(journalOperations.ref)
|
||||
|
||||
private def consumerBehavior(
|
||||
c: ActorRef[ConsumerController.Start[TestConsumer.Job]],
|
||||
consumerProbe: ActorRef[TestConsumer.JobDelivery]): Behavior[TestConsumer.Command] =
|
||||
Behaviors.setup[TestConsumer.Command] { context =>
|
||||
val deliveryAdapter = context.messageAdapter[ConsumerController.Delivery[TestConsumer.Job]] { d =>
|
||||
TestConsumer.JobDelivery(d.message, d.confirmTo, d.producerId, d.seqNr)
|
||||
}
|
||||
c ! ConsumerController.Start(deliveryAdapter)
|
||||
Behaviors.receiveMessagePartial {
|
||||
case jobDelivery: TestConsumer.JobDelivery =>
|
||||
consumerProbe.ref ! jobDelivery
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
"ReliableDelivery with sharding and durable queue" must {
|
||||
|
||||
"join cluster" in {
|
||||
Cluster(system).manager ! Join(Cluster(system).selfMember.address)
|
||||
}
|
||||
|
||||
"load initial state and resend unconfirmed" in {
|
||||
nextId()
|
||||
val typeKey = EntityTypeKey[SequencedMessage[TestConsumer.Job]](s"TestConsumer-$idCount")
|
||||
val consumerProbe = createTestProbe[TestConsumer.JobDelivery]()
|
||||
val sharding: ActorRef[ShardingEnvelope[SequencedMessage[TestConsumer.Job]]] =
|
||||
ClusterSharding(system).init(Entity(typeKey)(_ =>
|
||||
ShardingConsumerController[TestConsumer.Job, TestConsumer.Command](c =>
|
||||
consumerBehavior(c, consumerProbe.ref))))
|
||||
|
||||
val shardingProducerController =
|
||||
spawn(
|
||||
ShardingProducerController[TestConsumer.Job](
|
||||
producerId,
|
||||
sharding,
|
||||
Some(EventSourcedProducerQueue[TestConsumer.Job](PersistenceId.ofUniqueId(producerId)))),
|
||||
s"shardingController-$idCount")
|
||||
val producerProbe = createTestProbe[ShardingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
shardingProducerController ! ShardingProducerController.Start(producerProbe.ref)
|
||||
|
||||
(1 to 4).foreach { n =>
|
||||
producerProbe.receiveMessage().sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job(s"msg-$n"))
|
||||
journalOperations.expectMessageType[InmemJournal.Write].event.getClass should ===(
|
||||
classOf[DurableProducerQueue.MessageSent[_]])
|
||||
}
|
||||
|
||||
journalOperations.expectNoMessage()
|
||||
|
||||
val delivery1 = consumerProbe.receiveMessage()
|
||||
delivery1.confirmTo ! ConsumerController.Confirmed
|
||||
journalOperations.expectMessageType[InmemJournal.Write].event.getClass should ===(
|
||||
classOf[DurableProducerQueue.Confirmed])
|
||||
|
||||
val delivery2 = consumerProbe.receiveMessage()
|
||||
delivery2.confirmTo ! ConsumerController.Confirmed
|
||||
journalOperations.expectMessageType[InmemJournal.Write].event.getClass should ===(
|
||||
classOf[DurableProducerQueue.Confirmed])
|
||||
|
||||
producerProbe.receiveMessage()
|
||||
|
||||
// let the initial messages reach the ShardingConsumerController before stopping ShardingProducerController
|
||||
val delivery3 = consumerProbe.receiveMessage()
|
||||
delivery3.msg should ===(TestConsumer.Job("msg-3"))
|
||||
delivery3.seqNr should ===(3)
|
||||
Thread.sleep(1000)
|
||||
|
||||
system.log.info("Stopping [{}]", shardingProducerController)
|
||||
testKit.stop(shardingProducerController)
|
||||
|
||||
val shardingProducerController2 =
|
||||
spawn(
|
||||
ShardingProducerController[TestConsumer.Job](
|
||||
producerId,
|
||||
sharding,
|
||||
Some(EventSourcedProducerQueue[TestConsumer.Job](PersistenceId.ofUniqueId(producerId)))),
|
||||
s"shardingController2-$idCount")
|
||||
shardingProducerController2 ! ShardingProducerController.Start(producerProbe.ref)
|
||||
|
||||
// delivery3 and delivery4 are still from old shardingProducerController, that were queued in ConsumerController
|
||||
delivery3.confirmTo ! ConsumerController.Confirmed
|
||||
// that confirmation goes to old dead shardingProducerController, and therefore not stored
|
||||
journalOperations.expectNoMessage()
|
||||
|
||||
val delivery4 = consumerProbe.receiveMessage()
|
||||
delivery4.msg should ===(TestConsumer.Job("msg-4"))
|
||||
delivery4.seqNr should ===(4)
|
||||
delivery4.confirmTo ! ConsumerController.Confirmed
|
||||
// that confirmation goes to old dead shardingProducerController, and therefore not stored
|
||||
journalOperations.expectNoMessage()
|
||||
|
||||
// now the unconfirmed are redelivered
|
||||
val redelivery3 = consumerProbe.receiveMessage()
|
||||
redelivery3.msg should ===(TestConsumer.Job("msg-3"))
|
||||
redelivery3.seqNr should ===(1) // new ProducerController and there starting at 1
|
||||
redelivery3.confirmTo ! ConsumerController.Confirmed
|
||||
val confirmed3 =
|
||||
journalOperations.expectMessageType[InmemJournal.Write].event.asInstanceOf[DurableProducerQueue.Confirmed]
|
||||
confirmed3.seqNr should ===(3)
|
||||
confirmed3.confirmationQualifier should ===("entity-1")
|
||||
|
||||
val redelivery4 = consumerProbe.receiveMessage()
|
||||
redelivery4.msg should ===(TestConsumer.Job("msg-4"))
|
||||
redelivery4.seqNr should ===(2)
|
||||
redelivery4.confirmTo ! ConsumerController.Confirmed
|
||||
val confirmed4 =
|
||||
journalOperations.expectMessageType[InmemJournal.Write].event.asInstanceOf[DurableProducerQueue.Confirmed]
|
||||
confirmed4.seqNr should ===(4)
|
||||
confirmed4.confirmationQualifier should ===("entity-1")
|
||||
|
||||
val next5 = producerProbe.receiveMessage()
|
||||
next5.sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job(s"msg-5"))
|
||||
journalOperations.expectMessageType[InmemJournal.Write].event.getClass should ===(
|
||||
classOf[DurableProducerQueue.MessageSent[_]])
|
||||
|
||||
val delivery5 = consumerProbe.receiveMessage()
|
||||
delivery5.msg should ===(TestConsumer.Job("msg-5"))
|
||||
delivery5.seqNr should ===(3)
|
||||
delivery5.confirmTo ! ConsumerController.Confirmed
|
||||
val confirmed5 =
|
||||
journalOperations.expectMessageType[InmemJournal.Write].event.asInstanceOf[DurableProducerQueue.Confirmed]
|
||||
confirmed5.seqNr should ===(5)
|
||||
confirmed5.confirmationQualifier should ===("entity-1")
|
||||
|
||||
testKit.stop(shardingProducerController2)
|
||||
}
|
||||
|
||||
"reply to MessageWithConfirmation after storage" in {
|
||||
import ShardingProducerController.MessageWithConfirmation
|
||||
nextId()
|
||||
val typeKey = EntityTypeKey[SequencedMessage[TestConsumer.Job]](s"TestConsumer-$idCount")
|
||||
val consumerProbe = createTestProbe[TestConsumer.JobDelivery]()
|
||||
|
||||
val sharding: ActorRef[ShardingEnvelope[SequencedMessage[TestConsumer.Job]]] =
|
||||
ClusterSharding(system).init(Entity(typeKey)(_ =>
|
||||
ShardingConsumerController[TestConsumer.Job, TestConsumer.Command](c =>
|
||||
consumerBehavior(c, consumerProbe.ref))))
|
||||
|
||||
val shardingProducerController =
|
||||
spawn(
|
||||
ShardingProducerController[TestConsumer.Job](
|
||||
producerId,
|
||||
sharding,
|
||||
Some(EventSourcedProducerQueue[TestConsumer.Job](PersistenceId.ofUniqueId(producerId)))),
|
||||
s"shardingController-$idCount")
|
||||
val producerProbe = createTestProbe[ShardingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
shardingProducerController ! ShardingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val replyProbe = createTestProbe[Done]()
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(
|
||||
"entity-1",
|
||||
TestConsumer.Job(s"msg-1"),
|
||||
replyProbe.ref)
|
||||
journalOperations.expectMessageType[InmemJournal.Write].event.getClass should ===(
|
||||
classOf[DurableProducerQueue.MessageSent[_]])
|
||||
replyProbe.expectMessage(Done)
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! MessageWithConfirmation(
|
||||
"entity-2",
|
||||
TestConsumer.Job(s"msg-2"),
|
||||
replyProbe.ref)
|
||||
journalOperations.expectMessageType[InmemJournal.Write].event.getClass should ===(
|
||||
classOf[DurableProducerQueue.MessageSent[_]])
|
||||
replyProbe.expectMessage(Done)
|
||||
|
||||
testKit.stop(shardingProducerController)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,518 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sharding.typed.delivery
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.Done
|
||||
import akka.actor.testkit.typed.scaladsl.LogCapturing
|
||||
import akka.actor.testkit.typed.scaladsl.LoggingTestKit
|
||||
import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.delivery.ConsumerController.SequencedMessage
|
||||
import akka.actor.typed.delivery.TestConsumer
|
||||
import akka.actor.typed.delivery.internal.ProducerControllerImpl
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.actor.typed.scaladsl.LoggerOps
|
||||
import akka.cluster.typed.Cluster
|
||||
import akka.cluster.sharding.typed.ShardingEnvelope
|
||||
import akka.cluster.sharding.typed.scaladsl.ClusterSharding
|
||||
import akka.cluster.sharding.typed.scaladsl.Entity
|
||||
import akka.cluster.sharding.typed.scaladsl.EntityTypeKey
|
||||
import akka.cluster.typed.Join
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
object ReliableDeliveryShardingSpec {
|
||||
val config = ConfigFactory.parseString("""
|
||||
akka.actor.provider = cluster
|
||||
akka.remote.classic.netty.tcp.port = 0
|
||||
akka.remote.artery.canonical.port = 0
|
||||
akka.reliable-delivery.consumer-controller.flow-control-window = 20
|
||||
""")
|
||||
|
||||
object TestShardingProducer {
|
||||
|
||||
trait Command
|
||||
final case class RequestNext(sendToRef: ActorRef[ShardingEnvelope[TestConsumer.Job]]) extends Command
|
||||
|
||||
private final case object Tick extends Command
|
||||
|
||||
def apply(producerController: ActorRef[ShardingProducerController.Start[TestConsumer.Job]]): Behavior[Command] = {
|
||||
Behaviors.setup { context =>
|
||||
context.setLoggerName("TestShardingProducer")
|
||||
val requestNextAdapter: ActorRef[ShardingProducerController.RequestNext[TestConsumer.Job]] =
|
||||
context.messageAdapter(req => RequestNext(req.sendNextTo))
|
||||
producerController ! ShardingProducerController.Start(requestNextAdapter)
|
||||
|
||||
// simulate fast producer
|
||||
Behaviors.withTimers { timers =>
|
||||
timers.startTimerWithFixedDelay(Tick, Tick, 20.millis)
|
||||
idle(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def idle(n: Int): Behavior[Command] = {
|
||||
Behaviors.receiveMessage {
|
||||
case Tick => Behaviors.same
|
||||
case RequestNext(sendTo) => active(n + 1, sendTo)
|
||||
}
|
||||
}
|
||||
|
||||
private def active(n: Int, sendTo: ActorRef[ShardingEnvelope[TestConsumer.Job]]): Behavior[Command] = {
|
||||
Behaviors.receive { (ctx, msg) =>
|
||||
msg match {
|
||||
case Tick =>
|
||||
val msg = s"msg-$n"
|
||||
val entityId = s"entity-${n % 3}"
|
||||
ctx.log.info2("sent {} to {}", msg, entityId)
|
||||
sendTo ! ShardingEnvelope(entityId, TestConsumer.Job(msg))
|
||||
idle(n)
|
||||
|
||||
case RequestNext(_) =>
|
||||
// already active
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class ReliableDeliveryShardingSpec
|
||||
extends ScalaTestWithActorTestKit(ReliableDeliveryShardingSpec.config)
|
||||
with AnyWordSpecLike
|
||||
with LogCapturing {
|
||||
import ReliableDeliveryShardingSpec._
|
||||
import TestConsumer.defaultConsumerDelay
|
||||
|
||||
private var idCount = 0
|
||||
private def nextId(): Int = {
|
||||
idCount += 1
|
||||
idCount
|
||||
}
|
||||
|
||||
private def producerId: String = s"p-$idCount"
|
||||
|
||||
"ReliableDelivery with sharding" must {
|
||||
"join cluster" in {
|
||||
Cluster(system).manager ! Join(Cluster(system).selfMember.address)
|
||||
}
|
||||
|
||||
"illustrate sharding usage" in {
|
||||
nextId()
|
||||
val consumerEndProbe = createTestProbe[TestConsumer.CollectedProducerIds]()
|
||||
val typeKey = EntityTypeKey[SequencedMessage[TestConsumer.Job]](s"TestConsumer-$idCount")
|
||||
val sharding: ActorRef[ShardingEnvelope[SequencedMessage[TestConsumer.Job]]] =
|
||||
ClusterSharding(system).init(Entity(typeKey)(_ =>
|
||||
ShardingConsumerController[TestConsumer.Job, TestConsumer.Command](c =>
|
||||
TestConsumer(defaultConsumerDelay, 42, consumerEndProbe.ref, c))))
|
||||
|
||||
val shardingProducerController =
|
||||
spawn(ShardingProducerController[TestConsumer.Job](producerId, sharding, None), s"shardingController-$idCount")
|
||||
val producer = spawn(TestShardingProducer(shardingProducerController), name = s"shardingProducer-$idCount")
|
||||
|
||||
// expecting 3 end messages, one for each entity: "entity-0", "entity-1", "entity-2"
|
||||
consumerEndProbe.receiveMessages(3, 5.seconds)
|
||||
|
||||
testKit.stop(producer)
|
||||
testKit.stop(shardingProducerController)
|
||||
}
|
||||
|
||||
"illustrate sharding usage with several producers" in {
|
||||
nextId()
|
||||
val consumerEndProbe = createTestProbe[TestConsumer.CollectedProducerIds]()
|
||||
val typeKey = EntityTypeKey[SequencedMessage[TestConsumer.Job]](s"TestConsumer-$idCount")
|
||||
val sharding: ActorRef[ShardingEnvelope[SequencedMessage[TestConsumer.Job]]] =
|
||||
ClusterSharding(system).init(Entity(typeKey)(_ =>
|
||||
ShardingConsumerController[TestConsumer.Job, TestConsumer.Command](c =>
|
||||
TestConsumer(defaultConsumerDelay, 42, consumerEndProbe.ref, c))))
|
||||
|
||||
val shardingController1 =
|
||||
spawn(
|
||||
ShardingProducerController[TestConsumer.Job](
|
||||
s"p1-$idCount", // note different producerId
|
||||
sharding,
|
||||
None),
|
||||
s"shardingController1-$idCount")
|
||||
val producer1 = spawn(TestShardingProducer(shardingController1), name = s"shardingProducer1-$idCount")
|
||||
|
||||
val shardingController2 =
|
||||
spawn(
|
||||
ShardingProducerController[TestConsumer.Job](
|
||||
s"p2-$idCount", // note different producerId
|
||||
sharding,
|
||||
None),
|
||||
s"shardingController2-$idCount")
|
||||
val producer2 = spawn(TestShardingProducer(shardingController2), name = s"shardingProducer2-$idCount")
|
||||
|
||||
// expecting 3 end messages, one for each entity: "entity-0", "entity-1", "entity-2"
|
||||
val endMessages = consumerEndProbe.receiveMessages(3, 5.seconds)
|
||||
// verify that they received messages from both producers
|
||||
endMessages.flatMap(_.producerIds).toSet should ===(
|
||||
Set(
|
||||
s"p1-$idCount-entity-0",
|
||||
s"p1-$idCount-entity-1",
|
||||
s"p1-$idCount-entity-2",
|
||||
s"p2-$idCount-entity-0",
|
||||
s"p2-$idCount-entity-1",
|
||||
s"p2-$idCount-entity-2"))
|
||||
|
||||
testKit.stop(producer1)
|
||||
testKit.stop(producer2)
|
||||
testKit.stop(shardingController1)
|
||||
testKit.stop(shardingController2)
|
||||
}
|
||||
|
||||
"reply to MessageWithConfirmation" in {
|
||||
nextId()
|
||||
val consumerEndProbe = createTestProbe[TestConsumer.CollectedProducerIds]()
|
||||
val typeKey = EntityTypeKey[SequencedMessage[TestConsumer.Job]](s"TestConsumer-$idCount")
|
||||
val sharding: ActorRef[ShardingEnvelope[SequencedMessage[TestConsumer.Job]]] =
|
||||
ClusterSharding(system).init(Entity(typeKey)(_ =>
|
||||
ShardingConsumerController[TestConsumer.Job, TestConsumer.Command](c =>
|
||||
TestConsumer(defaultConsumerDelay, 3, consumerEndProbe.ref, c))))
|
||||
|
||||
val shardingProducerController =
|
||||
spawn(ShardingProducerController[TestConsumer.Job](producerId, sharding, None), s"shardingController-$idCount")
|
||||
|
||||
val producerProbe = createTestProbe[ShardingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
shardingProducerController ! ShardingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val replyProbe = createTestProbe[Done]()
|
||||
producerProbe.receiveMessage().askNextTo ! ShardingProducerController.MessageWithConfirmation(
|
||||
"entity-0",
|
||||
TestConsumer.Job("msg-1"),
|
||||
replyProbe.ref)
|
||||
producerProbe.receiveMessage().askNextTo ! ShardingProducerController.MessageWithConfirmation(
|
||||
"entity-0",
|
||||
TestConsumer.Job("msg-2"),
|
||||
replyProbe.ref)
|
||||
producerProbe.receiveMessage().askNextTo ! ShardingProducerController.MessageWithConfirmation(
|
||||
"entity-1",
|
||||
TestConsumer.Job("msg-3"),
|
||||
replyProbe.ref)
|
||||
producerProbe.receiveMessage().askNextTo ! ShardingProducerController.MessageWithConfirmation(
|
||||
"entity-0",
|
||||
TestConsumer.Job("msg-4"),
|
||||
replyProbe.ref)
|
||||
|
||||
consumerEndProbe.receiveMessage() // entity-0 received 3 messages
|
||||
consumerEndProbe.expectNoMessage()
|
||||
|
||||
producerProbe.receiveMessage().askNextTo ! ShardingProducerController.MessageWithConfirmation(
|
||||
"entity-1",
|
||||
TestConsumer.Job("msg-5"),
|
||||
replyProbe.ref)
|
||||
producerProbe.receiveMessage().askNextTo ! ShardingProducerController.MessageWithConfirmation(
|
||||
"entity-1",
|
||||
TestConsumer.Job("msg-6"),
|
||||
replyProbe.ref)
|
||||
consumerEndProbe.receiveMessage() // entity-0 received 3 messages
|
||||
|
||||
testKit.stop(shardingProducerController)
|
||||
}
|
||||
|
||||
"include demand information in RequestNext" in {
|
||||
nextId()
|
||||
|
||||
val shardingProbe =
|
||||
createTestProbe[ShardingEnvelope[SequencedMessage[TestConsumer.Job]]]()
|
||||
val shardingProducerController =
|
||||
spawn(
|
||||
ShardingProducerController[TestConsumer.Job](producerId, shardingProbe.ref, None),
|
||||
s"shardingController-$idCount")
|
||||
val producerProbe = createTestProbe[ShardingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
shardingProducerController ! ShardingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val next1 = producerProbe.receiveMessage()
|
||||
next1.entitiesWithDemand should ===(Set.empty)
|
||||
next1.bufferedForEntitiesWithoutDemand should ===(Map.empty)
|
||||
|
||||
next1.sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-1"))
|
||||
// for the first message no RequestNext until initial roundtrip
|
||||
producerProbe.expectNoMessage()
|
||||
|
||||
val seq1 = shardingProbe.receiveMessage().message
|
||||
seq1.message should ===(TestConsumer.Job("msg-1"))
|
||||
seq1.producerController ! ProducerControllerImpl.Request(confirmedSeqNr = 0L, requestUpToSeqNr = 5, true, false)
|
||||
|
||||
val next2 = producerProbe.receiveMessage()
|
||||
next2.entitiesWithDemand should ===(Set("entity-1"))
|
||||
next2.bufferedForEntitiesWithoutDemand should ===(Map.empty)
|
||||
|
||||
next2.sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-2"))
|
||||
val next3 = producerProbe.receiveMessage()
|
||||
// could be sent immediately since had demand, and Request(requestUpToSeqNr-5)
|
||||
next3.entitiesWithDemand should ===(Set("entity-1"))
|
||||
next3.bufferedForEntitiesWithoutDemand should ===(Map.empty)
|
||||
|
||||
next3.sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-3"))
|
||||
val next4 = producerProbe.receiveMessage()
|
||||
next4.entitiesWithDemand should ===(Set("entity-1"))
|
||||
next4.bufferedForEntitiesWithoutDemand should ===(Map.empty)
|
||||
|
||||
next4.sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-4"))
|
||||
val next5 = producerProbe.receiveMessage()
|
||||
next5.entitiesWithDemand should ===(Set("entity-1"))
|
||||
next5.bufferedForEntitiesWithoutDemand should ===(Map.empty)
|
||||
|
||||
next5.sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-5"))
|
||||
// no more demand Request(requestUpToSeqNr-5)
|
||||
producerProbe.expectNoMessage()
|
||||
// but we can anyway send more, which will be buffered
|
||||
next5.sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-6"))
|
||||
|
||||
shardingProbe.receiveMessage()
|
||||
shardingProbe.receiveMessage()
|
||||
shardingProbe.receiveMessage()
|
||||
val seq5 = shardingProbe.receiveMessage().message
|
||||
seq5.message should ===(TestConsumer.Job("msg-5"))
|
||||
|
||||
val next6 = producerProbe.receiveMessage()
|
||||
next6.entitiesWithDemand should ===(Set.empty)
|
||||
next6.bufferedForEntitiesWithoutDemand should ===(Map("entity-1" -> 1))
|
||||
|
||||
// and we can send to another entity
|
||||
next6.sendNextTo ! ShardingEnvelope("entity-2", TestConsumer.Job("msg-7"))
|
||||
producerProbe.expectNoMessage()
|
||||
val seq7 = shardingProbe.receiveMessage().message
|
||||
seq7.message should ===(TestConsumer.Job("msg-7"))
|
||||
seq7.producerController ! ProducerControllerImpl.Request(confirmedSeqNr = 0L, requestUpToSeqNr = 5, true, false)
|
||||
|
||||
val next8 = producerProbe.receiveMessage()
|
||||
next8.entitiesWithDemand should ===(Set("entity-2"))
|
||||
next8.bufferedForEntitiesWithoutDemand should ===(Map("entity-1" -> 1))
|
||||
|
||||
// when new demand the buffered messages will be be sent
|
||||
seq5.producerController ! ProducerControllerImpl.Request(confirmedSeqNr = 5L, requestUpToSeqNr = 10, true, false)
|
||||
val seq6 = shardingProbe.receiveMessage().message
|
||||
seq6.message should ===(TestConsumer.Job("msg-6"))
|
||||
|
||||
val next9 = producerProbe.receiveMessage()
|
||||
next9.entitiesWithDemand should ===(Set("entity-1", "entity-2"))
|
||||
next9.bufferedForEntitiesWithoutDemand should ===(Map.empty)
|
||||
|
||||
testKit.stop(shardingProducerController)
|
||||
}
|
||||
|
||||
"allow restart of producer" in {
|
||||
nextId()
|
||||
|
||||
val shardingProbe =
|
||||
createTestProbe[ShardingEnvelope[SequencedMessage[TestConsumer.Job]]]()
|
||||
val shardingProducerController =
|
||||
spawn(
|
||||
ShardingProducerController[TestConsumer.Job](producerId, shardingProbe.ref, None),
|
||||
s"shardingController-$idCount")
|
||||
val producerProbe = createTestProbe[ShardingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
shardingProducerController ! ShardingProducerController.Start(producerProbe.ref)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-1"))
|
||||
val seq1 = shardingProbe.receiveMessage().message
|
||||
seq1.message should ===(TestConsumer.Job("msg-1"))
|
||||
seq1.producerController ! ProducerControllerImpl.Request(confirmedSeqNr = 0L, requestUpToSeqNr = 5, true, false)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-2"))
|
||||
shardingProbe.receiveMessage().message.message should ===(TestConsumer.Job("msg-2"))
|
||||
|
||||
// restart producer, new Start
|
||||
val producerProbe2 = createTestProbe[ShardingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
shardingProducerController ! ShardingProducerController.Start(producerProbe2.ref)
|
||||
|
||||
producerProbe2.receiveMessage().sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-3"))
|
||||
shardingProbe.receiveMessage().message.message should ===(TestConsumer.Job("msg-3"))
|
||||
|
||||
testKit.stop(shardingProducerController)
|
||||
}
|
||||
|
||||
"deliver unconfirmed if ShardingConsumerController is terminated" in {
|
||||
// for example if ShardingConsumerController is rebalanced, but no more messages are sent to the entity
|
||||
nextId()
|
||||
|
||||
val consumerIncarnation = new AtomicInteger(0)
|
||||
val consumerProbes = Vector.fill(3)(createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]())
|
||||
|
||||
val typeKey = EntityTypeKey[SequencedMessage[TestConsumer.Job]](s"TestConsumer-$idCount")
|
||||
val region = ClusterSharding(system).init(Entity(typeKey)(_ =>
|
||||
ShardingConsumerController[TestConsumer.Job, TestConsumer.Command] { cc =>
|
||||
cc ! ConsumerController.Start(consumerProbes(consumerIncarnation.getAndIncrement()).ref)
|
||||
Behaviors.empty
|
||||
}))
|
||||
|
||||
val shardingProducerSettings =
|
||||
ShardingProducerController.Settings(system).withResendFirsUnconfirmedIdleTimeout(1500.millis)
|
||||
val shardingProducerController =
|
||||
spawn(
|
||||
ShardingProducerController[TestConsumer.Job](producerId, region, None, shardingProducerSettings),
|
||||
s"shardingController-$idCount")
|
||||
val producerProbe = createTestProbe[ShardingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
shardingProducerController ! ShardingProducerController.Start(producerProbe.ref)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-1"))
|
||||
val delivery1 = consumerProbes(0).receiveMessage()
|
||||
delivery1.message should ===(TestConsumer.Job("msg-1"))
|
||||
delivery1.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-2"))
|
||||
val delivery2 = consumerProbes(0).receiveMessage()
|
||||
delivery2.message should ===(TestConsumer.Job("msg-2"))
|
||||
delivery2.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-3"))
|
||||
val delivery3 = consumerProbes(0).receiveMessage()
|
||||
delivery3.message should ===(TestConsumer.Job("msg-3"))
|
||||
// msg-3 not Confirmed
|
||||
|
||||
consumerProbes(0).stop()
|
||||
Thread.sleep(1000) // let it terminate
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-4"))
|
||||
val delivery3b = consumerProbes(1).receiveMessage()
|
||||
// msg-3 is redelivered
|
||||
delivery3b.message should ===(TestConsumer.Job("msg-3"))
|
||||
delivery3b.confirmTo ! ConsumerController.Confirmed
|
||||
val delivery4 = consumerProbes(1).receiveMessage()
|
||||
delivery4.message should ===(TestConsumer.Job("msg-4"))
|
||||
|
||||
// redeliver also when no more messages are sent
|
||||
consumerProbes(1).stop()
|
||||
|
||||
val delivery4b = consumerProbes(2).receiveMessage()
|
||||
delivery4b.message should ===(TestConsumer.Job("msg-4"))
|
||||
|
||||
consumerProbes(2).stop()
|
||||
testKit.stop(shardingProducerController)
|
||||
}
|
||||
|
||||
"cleanup unused ProducerController" in {
|
||||
nextId()
|
||||
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
|
||||
val typeKey = EntityTypeKey[SequencedMessage[TestConsumer.Job]](s"TestConsumer-$idCount")
|
||||
val region = ClusterSharding(system).init(Entity(typeKey)(_ =>
|
||||
ShardingConsumerController[TestConsumer.Job, TestConsumer.Command] { cc =>
|
||||
cc ! ConsumerController.Start(consumerProbe.ref)
|
||||
Behaviors.empty
|
||||
}))
|
||||
|
||||
val shardingProducerSettings =
|
||||
ShardingProducerController.Settings(system).withCleanupUnusedAfter(1.second)
|
||||
val shardingProducerController =
|
||||
spawn(
|
||||
ShardingProducerController[TestConsumer.Job](producerId, region, None, shardingProducerSettings),
|
||||
s"shardingController-$idCount")
|
||||
val producerProbe = createTestProbe[ShardingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
shardingProducerController ! ShardingProducerController.Start(producerProbe.ref)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-1"))
|
||||
val delivery1 = consumerProbe.receiveMessage()
|
||||
delivery1.message should ===(TestConsumer.Job("msg-1"))
|
||||
delivery1.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-2"))
|
||||
val delivery2 = consumerProbe.receiveMessage()
|
||||
delivery2.message should ===(TestConsumer.Job("msg-2"))
|
||||
delivery2.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! ShardingEnvelope("entity-2", TestConsumer.Job("msg-3"))
|
||||
val delivery3 = consumerProbe.receiveMessage()
|
||||
delivery3.message should ===(TestConsumer.Job("msg-3"))
|
||||
// msg-3 not Confirmed
|
||||
|
||||
val next4 = producerProbe.receiveMessage()
|
||||
next4.entitiesWithDemand should ===(Set("entity-1", "entity-2"))
|
||||
|
||||
Thread.sleep(2000)
|
||||
|
||||
next4.sendNextTo ! ShardingEnvelope("entity-2", TestConsumer.Job("msg-4"))
|
||||
val next5 = producerProbe.receiveMessage()
|
||||
next5.entitiesWithDemand should ===(Set("entity-2")) // entity-1 removed
|
||||
|
||||
delivery3.confirmTo ! ConsumerController.Confirmed
|
||||
val delivery4 = consumerProbe.receiveMessage()
|
||||
delivery4.message should ===(TestConsumer.Job("msg-4"))
|
||||
delivery4.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
// send to entity-1 again
|
||||
next5.sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-5"))
|
||||
val delivery5 = consumerProbe.receiveMessage()
|
||||
delivery5.message should ===(TestConsumer.Job("msg-5"))
|
||||
delivery5.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
consumerProbe.stop()
|
||||
testKit.stop(shardingProducerController)
|
||||
}
|
||||
|
||||
"cleanup ConsumerController when ProducerController is terminated" in {
|
||||
nextId()
|
||||
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[TestConsumer.Job]]()
|
||||
|
||||
val typeKey = EntityTypeKey[SequencedMessage[TestConsumer.Job]](s"TestConsumer-$idCount")
|
||||
val region = ClusterSharding(system).init(Entity(typeKey)(_ =>
|
||||
ShardingConsumerController[TestConsumer.Job, TestConsumer.Command] { cc =>
|
||||
cc ! ConsumerController.Start(consumerProbe.ref)
|
||||
Behaviors.empty
|
||||
}))
|
||||
|
||||
val shardingProducerController1 =
|
||||
spawn(ShardingProducerController[TestConsumer.Job](producerId, region, None), s"shardingController-$idCount")
|
||||
val producerProbe = createTestProbe[ShardingProducerController.RequestNext[TestConsumer.Job]]()
|
||||
shardingProducerController1 ! ShardingProducerController.Start(producerProbe.ref)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-1"))
|
||||
val delivery1 = consumerProbe.receiveMessage()
|
||||
delivery1.message should ===(TestConsumer.Job("msg-1"))
|
||||
delivery1.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-2"))
|
||||
val delivery2 = consumerProbe.receiveMessage()
|
||||
delivery2.message should ===(TestConsumer.Job("msg-2"))
|
||||
delivery2.confirmTo ! ConsumerController.Confirmed
|
||||
producerProbe.receiveMessage()
|
||||
|
||||
LoggingTestKit.empty
|
||||
.withMessageRegex("ProducerController.*terminated")
|
||||
.withLoggerName("akka.cluster.sharding.typed.delivery.ShardingConsumerController")
|
||||
.expect {
|
||||
testKit.stop(shardingProducerController1)
|
||||
}
|
||||
|
||||
val shardingProducerController2 =
|
||||
spawn(ShardingProducerController[TestConsumer.Job](producerId, region, None), s"shardingController-$idCount")
|
||||
shardingProducerController2 ! ShardingProducerController.Start(producerProbe.ref)
|
||||
|
||||
LoggingTestKit
|
||||
.debug("Starting ConsumerController")
|
||||
.withLoggerName("akka.cluster.sharding.typed.delivery.ShardingConsumerController")
|
||||
.expect {
|
||||
producerProbe.receiveMessage().sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-3"))
|
||||
}
|
||||
val delivery3 = consumerProbe.receiveMessage()
|
||||
delivery3.message should ===(TestConsumer.Job("msg-3"))
|
||||
delivery3.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! ShardingEnvelope("entity-1", TestConsumer.Job("msg-4"))
|
||||
val delivery4 = consumerProbe.receiveMessage()
|
||||
delivery4.message should ===(TestConsumer.Job("msg-4"))
|
||||
delivery4.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
consumerProbe.stop()
|
||||
testKit.stop(shardingProducerController2)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TODO #28723 add a random test for sharding
|
||||
|
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package docs.delivery
|
||||
|
||||
import java.util.UUID
|
||||
|
||||
import com.github.ghik.silencer.silent
|
||||
|
||||
import akka.actor.typed.ActorSystem
|
||||
|
||||
//#imports
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.delivery.ProducerController
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
|
||||
//#imports
|
||||
|
||||
@silent("never used")
|
||||
object PointToPointDocExample {
|
||||
|
||||
//#producer
|
||||
object FibonacciProducer {
|
||||
sealed trait Command
|
||||
|
||||
private case class WrappedRequestNext(r: ProducerController.RequestNext[FibonacciConsumer.Command]) extends Command
|
||||
|
||||
def apply(
|
||||
producerController: ActorRef[ProducerController.Command[FibonacciConsumer.Command]]): Behavior[Command] = {
|
||||
Behaviors.setup { context =>
|
||||
val requestNextAdapter =
|
||||
context.messageAdapter[ProducerController.RequestNext[FibonacciConsumer.Command]](WrappedRequestNext(_))
|
||||
producerController ! ProducerController.Start(requestNextAdapter)
|
||||
|
||||
fibonacci(0, 1, 0)
|
||||
}
|
||||
}
|
||||
|
||||
private def fibonacci(n: Long, b: BigInt, a: BigInt): Behavior[Command] = {
|
||||
Behaviors.receive {
|
||||
case (context, WrappedRequestNext(next)) =>
|
||||
context.log.info("Generated fibonacci {}: {}", n, a)
|
||||
next.sendNextTo ! FibonacciConsumer.FibonacciNumber(n, a)
|
||||
|
||||
if (n == 1000)
|
||||
Behaviors.stopped
|
||||
else
|
||||
fibonacci(n + 1, a + b, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
//#producer
|
||||
|
||||
//#consumer
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
|
||||
object FibonacciConsumer {
|
||||
sealed trait Command
|
||||
|
||||
final case class FibonacciNumber(n: Long, value: BigInt) extends Command
|
||||
|
||||
private case class WrappedDelivery(d: ConsumerController.Delivery[Command]) extends Command
|
||||
|
||||
def apply(
|
||||
consumerController: ActorRef[ConsumerController.Command[FibonacciConsumer.Command]]): Behavior[Command] = {
|
||||
Behaviors.setup { context =>
|
||||
val deliveryAdapter =
|
||||
context.messageAdapter[ConsumerController.Delivery[FibonacciConsumer.Command]](WrappedDelivery(_))
|
||||
consumerController ! ConsumerController.Start(deliveryAdapter)
|
||||
|
||||
Behaviors.receiveMessagePartial {
|
||||
case WrappedDelivery(ConsumerController.Delivery(FibonacciNumber(n, value), confirmTo)) =>
|
||||
context.log.info("Processed fibonacci {}: {}", n, value)
|
||||
confirmTo ! ConsumerController.Confirmed
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//#consumer
|
||||
|
||||
object Guardian {
|
||||
def apply(): Behavior[Nothing] = {
|
||||
Behaviors.setup[Nothing] { context =>
|
||||
//#connect
|
||||
val consumerController = context.spawn(ConsumerController[FibonacciConsumer.Command](), "consumerController")
|
||||
context.spawn(FibonacciConsumer(consumerController), "consumer")
|
||||
|
||||
val producerId = s"fibonacci-${UUID.randomUUID()}"
|
||||
val producerController = context.spawn(
|
||||
ProducerController[FibonacciConsumer.Command](producerId, durableQueueBehavior = None),
|
||||
"producerController")
|
||||
context.spawn(FibonacciProducer(producerController), "producer")
|
||||
|
||||
consumerController ! ConsumerController.RegisterToProducerController(producerController)
|
||||
//#connect
|
||||
|
||||
Behaviors.empty
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def main(args: Array[String]): Unit = {
|
||||
ActorSystem[Nothing](Guardian(), "FibonacciExample")
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,217 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package docs.delivery
|
||||
|
||||
//#imports
|
||||
import scala.concurrent.Future
|
||||
import scala.concurrent.duration._
|
||||
import scala.util.Failure
|
||||
import scala.util.Success
|
||||
|
||||
import akka.Done
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.scaladsl.ActorContext
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.cluster.sharding.typed.delivery.ShardingConsumerController
|
||||
import akka.util.Timeout
|
||||
|
||||
//#imports
|
||||
|
||||
object ShardingDocExample {
|
||||
|
||||
//#consumer
|
||||
trait DB {
|
||||
def save(id: String, value: TodoList.State): Future[Done]
|
||||
def load(id: String): Future[TodoList.State]
|
||||
}
|
||||
|
||||
object TodoList {
|
||||
|
||||
sealed trait Command
|
||||
|
||||
final case class AddTask(item: String) extends Command
|
||||
final case class CompleteTask(item: String) extends Command
|
||||
|
||||
private final case class InitialState(state: State) extends Command
|
||||
private final case class SaveSuccess(confirmTo: ActorRef[ConsumerController.Confirmed]) extends Command
|
||||
private final case class DBError(cause: Throwable) extends Command
|
||||
|
||||
private final case class CommandDelivery(command: Command, confirmTo: ActorRef[ConsumerController.Confirmed])
|
||||
extends Command
|
||||
|
||||
final case class State(tasks: Vector[String])
|
||||
|
||||
def apply(
|
||||
id: String,
|
||||
db: DB,
|
||||
consumerController: ActorRef[ConsumerController.Start[Command]]): Behavior[Command] = {
|
||||
Behaviors.setup[Command] { context =>
|
||||
new TodoList(context, id, db).start(consumerController)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class TodoList(context: ActorContext[TodoList.Command], id: String, db: DB) {
|
||||
import TodoList._
|
||||
|
||||
private def start(consumerController: ActorRef[ConsumerController.Start[Command]]): Behavior[Command] = {
|
||||
context.pipeToSelf(db.load(id)) {
|
||||
case Success(value) => InitialState(value)
|
||||
case Failure(cause) => DBError(cause)
|
||||
}
|
||||
|
||||
Behaviors.receiveMessagePartial {
|
||||
case InitialState(state) =>
|
||||
val deliveryAdapter: ActorRef[ConsumerController.Delivery[Command]] = context.messageAdapter { delivery =>
|
||||
CommandDelivery(delivery.message, delivery.confirmTo)
|
||||
}
|
||||
consumerController ! ConsumerController.Start(deliveryAdapter)
|
||||
active(state)
|
||||
case DBError(cause) =>
|
||||
throw cause
|
||||
}
|
||||
}
|
||||
|
||||
private def active(state: State): Behavior[Command] = {
|
||||
Behaviors.receiveMessagePartial {
|
||||
case CommandDelivery(AddTask(item), confirmTo) =>
|
||||
val newState = state.copy(tasks = state.tasks :+ item)
|
||||
save(newState, confirmTo)
|
||||
active(newState)
|
||||
case CommandDelivery(CompleteTask(item), confirmTo) =>
|
||||
val newState = state.copy(tasks = state.tasks.filterNot(_ == item))
|
||||
save(newState, confirmTo)
|
||||
active(newState)
|
||||
case SaveSuccess(confirmTo) =>
|
||||
confirmTo ! ConsumerController.Confirmed
|
||||
Behaviors.same
|
||||
case DBError(cause) =>
|
||||
throw cause
|
||||
}
|
||||
}
|
||||
|
||||
private def save(newState: State, confirmTo: ActorRef[ConsumerController.Confirmed]): Unit = {
|
||||
context.pipeToSelf(db.save(id, newState)) {
|
||||
case Success(_) => SaveSuccess(confirmTo)
|
||||
case Failure(cause) => DBError(cause)
|
||||
}
|
||||
}
|
||||
}
|
||||
//#consumer
|
||||
|
||||
//#producer
|
||||
import akka.cluster.sharding.typed.delivery.ShardingProducerController
|
||||
|
||||
object TodoService {
|
||||
sealed trait Command
|
||||
|
||||
final case class UpdateTodo(listId: String, item: String, completed: Boolean, replyTo: ActorRef[Response])
|
||||
extends Command
|
||||
|
||||
sealed trait Response
|
||||
case object Accepted extends Response
|
||||
case object Rejected extends Response
|
||||
case object MaybeAccepted extends Response
|
||||
|
||||
private final case class WrappedRequestNext(requestNext: ShardingProducerController.RequestNext[TodoList.Command])
|
||||
extends Command
|
||||
private final case class Confirmed(originalReplyTo: ActorRef[Response]) extends Command
|
||||
private final case class TimedOut(originalReplyTo: ActorRef[Response]) extends Command
|
||||
|
||||
def apply(producerController: ActorRef[ShardingProducerController.Command[TodoList.Command]]): Behavior[Command] = {
|
||||
Behaviors.setup { context =>
|
||||
new TodoService(context).start(producerController)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class TodoService(context: ActorContext[TodoService.Command]) {
|
||||
import TodoService._
|
||||
|
||||
private implicit val askTimeout: Timeout = 5.seconds
|
||||
|
||||
private def start(
|
||||
producerController: ActorRef[ShardingProducerController.Start[TodoList.Command]]): Behavior[Command] = {
|
||||
val requestNextAdapter: ActorRef[ShardingProducerController.RequestNext[TodoList.Command]] =
|
||||
context.messageAdapter(WrappedRequestNext.apply)
|
||||
producerController ! ShardingProducerController.Start(requestNextAdapter)
|
||||
|
||||
Behaviors.receiveMessagePartial {
|
||||
case WrappedRequestNext(next) =>
|
||||
active(next)
|
||||
case UpdateTodo(_, _, _, replyTo) =>
|
||||
// not hooked up with shardingProducerController yet
|
||||
replyTo ! Rejected
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
private def active(requestNext: ShardingProducerController.RequestNext[TodoList.Command]): Behavior[Command] = {
|
||||
Behaviors.receiveMessage {
|
||||
case WrappedRequestNext(next) =>
|
||||
active(next)
|
||||
|
||||
case UpdateTodo(listId, item, completed, replyTo) =>
|
||||
if (requestNext.bufferedForEntitiesWithoutDemand.getOrElse(listId, 0) >= 100)
|
||||
replyTo ! Rejected
|
||||
else {
|
||||
val requestMsg = if (completed) TodoList.CompleteTask(item) else TodoList.AddTask(item)
|
||||
context.ask[ShardingProducerController.MessageWithConfirmation[TodoList.Command], Done](
|
||||
requestNext.askNextTo,
|
||||
askReplyTo => ShardingProducerController.MessageWithConfirmation(listId, requestMsg, askReplyTo)) {
|
||||
case Success(Done) => Confirmed(replyTo)
|
||||
case Failure(_) => TimedOut(replyTo)
|
||||
}
|
||||
}
|
||||
Behaviors.same
|
||||
|
||||
case Confirmed(originalReplyTo) =>
|
||||
originalReplyTo ! Accepted
|
||||
Behaviors.same
|
||||
|
||||
case TimedOut(originalReplyTo) =>
|
||||
originalReplyTo ! MaybeAccepted
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
//#producer
|
||||
|
||||
def illustrateInit(): Unit = {
|
||||
Behaviors.setup[Nothing] { context =>
|
||||
//#init
|
||||
import akka.cluster.sharding.typed.scaladsl.ClusterSharding
|
||||
import akka.cluster.sharding.typed.scaladsl.Entity
|
||||
import akka.cluster.sharding.typed.scaladsl.EntityTypeKey
|
||||
import akka.cluster.typed.Cluster
|
||||
|
||||
val db: DB = ???
|
||||
|
||||
val system = context.system
|
||||
|
||||
val TypeKey = EntityTypeKey[ConsumerController.SequencedMessage[TodoList.Command]]("todo")
|
||||
|
||||
val region = ClusterSharding(system).init(Entity(TypeKey)(entityContext =>
|
||||
ShardingConsumerController(start => TodoList(entityContext.entityId, db, start))))
|
||||
|
||||
val selfAddress = Cluster(system).selfMember.address
|
||||
val producerId = s"todo-producer-${selfAddress.host}:${selfAddress.port}"
|
||||
|
||||
val producerController =
|
||||
context.spawn(ShardingProducerController(producerId, region, durableQueueBehavior = None), "producerController")
|
||||
|
||||
context.spawn(TodoService(producerController), "producer")
|
||||
//#init
|
||||
|
||||
Behaviors.empty
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,233 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package docs.delivery
|
||||
|
||||
import java.util.UUID
|
||||
|
||||
import scala.concurrent.duration._
|
||||
import scala.util.Failure
|
||||
import scala.util.Success
|
||||
|
||||
import akka.Done
|
||||
import akka.actor.typed.ActorRef
|
||||
import com.github.ghik.silencer.silent
|
||||
|
||||
@silent("never used")
|
||||
object WorkPullingDocExample {
|
||||
|
||||
//#imports
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.actor.typed.Behavior
|
||||
//#imports
|
||||
|
||||
//#consumer
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.receptionist.ServiceKey
|
||||
|
||||
object ImageConverter {
|
||||
sealed trait Command
|
||||
final case class ConversionJob(resultId: UUID, fromFormat: String, toFormat: String, image: Array[Byte])
|
||||
private case class WrappedDelivery(d: ConsumerController.Delivery[ConversionJob]) extends Command
|
||||
|
||||
val serviceKey = ServiceKey[ConsumerController.Command[ConversionJob]]("ImageConverter")
|
||||
|
||||
def apply(): Behavior[Command] = {
|
||||
Behaviors.setup { context =>
|
||||
val deliveryAdapter =
|
||||
context.messageAdapter[ConsumerController.Delivery[ConversionJob]](WrappedDelivery(_))
|
||||
val consumerController =
|
||||
context.spawn(ConsumerController(serviceKey), "consumerController")
|
||||
consumerController ! ConsumerController.Start(deliveryAdapter)
|
||||
|
||||
Behaviors.receiveMessage {
|
||||
case WrappedDelivery(delivery) =>
|
||||
val image = delivery.message.image
|
||||
val fromFormat = delivery.message.fromFormat
|
||||
val toFormat = delivery.message.toFormat
|
||||
// convert image...
|
||||
// store result with resultId key for later retrieval
|
||||
|
||||
// and when completed confirm
|
||||
delivery.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
Behaviors.same
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
//#consumer
|
||||
|
||||
//#producer
|
||||
import akka.actor.typed.delivery.WorkPullingProducerController
|
||||
import akka.actor.typed.scaladsl.ActorContext
|
||||
import akka.actor.typed.scaladsl.StashBuffer
|
||||
|
||||
object ImageWorkManager {
|
||||
trait Command
|
||||
final case class Convert(fromFormat: String, toFormat: String, image: Array[Byte]) extends Command
|
||||
private case class WrappedRequestNext(r: WorkPullingProducerController.RequestNext[ImageConverter.ConversionJob])
|
||||
extends Command
|
||||
|
||||
final case class GetResult(resultId: UUID, replyTo: ActorRef[Option[Array[Byte]]]) extends Command
|
||||
|
||||
//#producer
|
||||
|
||||
//#ask
|
||||
final case class ConvertRequest(
|
||||
fromFormat: String,
|
||||
toFormat: String,
|
||||
image: Array[Byte],
|
||||
replyTo: ActorRef[ConvertResponse])
|
||||
extends Command
|
||||
|
||||
sealed trait ConvertResponse
|
||||
final case class ConvertAccepted(resultId: UUID) extends ConvertResponse
|
||||
case object ConvertRejected extends ConvertResponse
|
||||
final case class ConvertTimedOut(resultId: UUID) extends ConvertResponse
|
||||
|
||||
private final case class AskReply(resultId: UUID, originalReplyTo: ActorRef[ConvertResponse], timeout: Boolean)
|
||||
extends Command
|
||||
//#ask
|
||||
|
||||
//#producer
|
||||
def apply(): Behavior[Command] = {
|
||||
Behaviors.setup { context =>
|
||||
val requestNextAdapter =
|
||||
context.messageAdapter[WorkPullingProducerController.RequestNext[ImageConverter.ConversionJob]](
|
||||
WrappedRequestNext(_))
|
||||
val producerController = context.spawn(
|
||||
WorkPullingProducerController(
|
||||
producerId = "workManager",
|
||||
workerServiceKey = ImageConverter.serviceKey,
|
||||
durableQueueBehavior = None),
|
||||
"producerController")
|
||||
//#producer
|
||||
//#durable-queue
|
||||
import akka.persistence.typed.delivery.EventSourcedProducerQueue
|
||||
import akka.persistence.typed.PersistenceId
|
||||
|
||||
val durableQueue =
|
||||
EventSourcedProducerQueue[ImageConverter.ConversionJob](PersistenceId.ofUniqueId("ImageWorkManager"))
|
||||
val durableProducerController = context.spawn(
|
||||
WorkPullingProducerController(
|
||||
producerId = "workManager",
|
||||
workerServiceKey = ImageConverter.serviceKey,
|
||||
durableQueueBehavior = Some(durableQueue)),
|
||||
"producerController")
|
||||
//#durable-queue
|
||||
//#producer
|
||||
producerController ! WorkPullingProducerController.Start(requestNextAdapter)
|
||||
|
||||
Behaviors.withStash(1000) { stashBuffer =>
|
||||
new ImageWorkManager(context, stashBuffer).waitForNext()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
final class ImageWorkManager(
|
||||
context: ActorContext[ImageWorkManager.Command],
|
||||
stashBuffer: StashBuffer[ImageWorkManager.Command]) {
|
||||
|
||||
import ImageWorkManager._
|
||||
|
||||
private def waitForNext(): Behavior[Command] = {
|
||||
Behaviors.receiveMessage {
|
||||
case WrappedRequestNext(next) =>
|
||||
stashBuffer.unstashAll(active(next))
|
||||
case c: Convert =>
|
||||
if (stashBuffer.isFull) {
|
||||
context.log.warn("Too many Convert requests.")
|
||||
Behaviors.same
|
||||
} else {
|
||||
stashBuffer.stash(c)
|
||||
Behaviors.same
|
||||
}
|
||||
case GetResult(resultId, replyTo) =>
|
||||
// TODO retrieve the stored result and reply
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
private def active(
|
||||
next: WorkPullingProducerController.RequestNext[ImageConverter.ConversionJob]): Behavior[Command] = {
|
||||
Behaviors.receiveMessage {
|
||||
case Convert(from, to, image) =>
|
||||
val resultId = UUID.randomUUID()
|
||||
next.sendNextTo ! ImageConverter.ConversionJob(resultId, from, to, image)
|
||||
waitForNext()
|
||||
case GetResult(resultId, replyTo) =>
|
||||
// TODO retrieve the stored result and reply
|
||||
Behaviors.same
|
||||
case _: WrappedRequestNext =>
|
||||
throw new IllegalStateException("Unexpected RequestNext")
|
||||
}
|
||||
}
|
||||
//#producer
|
||||
object askScope {
|
||||
//#ask
|
||||
|
||||
import WorkPullingProducerController.MessageWithConfirmation
|
||||
import akka.util.Timeout
|
||||
|
||||
implicit val askTimeout: Timeout = 5.seconds
|
||||
|
||||
private def waitForNext(): Behavior[Command] = {
|
||||
Behaviors.receiveMessage {
|
||||
case WrappedRequestNext(next) =>
|
||||
stashBuffer.unstashAll(active(next))
|
||||
case c: ConvertRequest =>
|
||||
if (stashBuffer.isFull) {
|
||||
c.replyTo ! ConvertRejected
|
||||
Behaviors.same
|
||||
} else {
|
||||
stashBuffer.stash(c)
|
||||
Behaviors.same
|
||||
}
|
||||
case AskReply(resultId, originalReplyTo, timeout) =>
|
||||
val response = if (timeout) ConvertTimedOut(resultId) else ConvertAccepted(resultId)
|
||||
originalReplyTo ! response
|
||||
Behaviors.same
|
||||
case GetResult(resultId, replyTo) =>
|
||||
// TODO retrieve the stored result and reply
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
private def active(
|
||||
next: WorkPullingProducerController.RequestNext[ImageConverter.ConversionJob]): Behavior[Command] = {
|
||||
Behaviors.receiveMessage {
|
||||
case ConvertRequest(from, to, image, originalReplyTo) =>
|
||||
val resultId = UUID.randomUUID()
|
||||
context.ask[MessageWithConfirmation[ImageConverter.ConversionJob], Done](
|
||||
next.askNextTo,
|
||||
askReplyTo =>
|
||||
MessageWithConfirmation(ImageConverter.ConversionJob(resultId, from, to, image), askReplyTo)) {
|
||||
case Success(done) => AskReply(resultId, originalReplyTo, timeout = false)
|
||||
case Failure(_) => AskReply(resultId, originalReplyTo, timeout = true)
|
||||
}
|
||||
waitForNext()
|
||||
case AskReply(resultId, originalReplyTo, timeout) =>
|
||||
val response = if (timeout) ConvertTimedOut(resultId) else ConvertAccepted(resultId)
|
||||
originalReplyTo ! response
|
||||
Behaviors.same
|
||||
case GetResult(resultId, replyTo) =>
|
||||
// TODO retrieve the stored result and reply
|
||||
Behaviors.same
|
||||
case _: WrappedRequestNext =>
|
||||
throw new IllegalStateException("Unexpected RequestNext")
|
||||
}
|
||||
}
|
||||
|
||||
//#ask
|
||||
}
|
||||
//#producer
|
||||
}
|
||||
//#producer
|
||||
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
74
akka-cluster-typed/src/main/protobuf/ReliableDelivery.proto
Normal file
74
akka-cluster-typed/src/main/protobuf/ReliableDelivery.proto
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
package akka.cluster.typed.delivery;
|
||||
|
||||
option java_package = "akka.cluster.typed.internal.protobuf";
|
||||
option optimize_for = SPEED;
|
||||
import "ContainerFormats.proto";
|
||||
|
||||
// ConsumerController
|
||||
message SequencedMessage {
|
||||
required string producerId = 1;
|
||||
required int64 seqNr = 2;
|
||||
required bool first = 3;
|
||||
required bool ack = 4;
|
||||
required string producerControllerRef = 5;
|
||||
required Payload message = 6;
|
||||
}
|
||||
|
||||
// ProducerController
|
||||
message RegisterConsumer {
|
||||
required string consumerControllerRef = 1;
|
||||
}
|
||||
|
||||
// ProducerController
|
||||
message Request {
|
||||
required int64 confirmedSeqNr = 1;
|
||||
required int64 requestUpToSeqNr = 2;
|
||||
required bool supportResend = 3;
|
||||
required bool viaTimeout = 4;
|
||||
}
|
||||
|
||||
// ProducerController
|
||||
message Resend {
|
||||
required int64 fromSeqNr = 1;
|
||||
}
|
||||
|
||||
// ProducerController
|
||||
message Ack {
|
||||
required int64 confirmedSeqNr = 1;
|
||||
}
|
||||
|
||||
// DurableProducerQueue
|
||||
message State {
|
||||
required int64 currentSeqNr = 1;
|
||||
required int64 highestConfirmedSeqNr = 2;
|
||||
repeated Confirmed confirmed = 3;
|
||||
repeated MessageSent unconfirmed = 4;
|
||||
}
|
||||
|
||||
// DurableProducerQueue
|
||||
message Confirmed {
|
||||
required int64 seqNr = 1;
|
||||
required string qualifier = 2;
|
||||
required int64 timestamp = 3;
|
||||
}
|
||||
|
||||
// DurableProducerQueue
|
||||
message MessageSent {
|
||||
required int64 seqNr = 1;
|
||||
required string qualifier = 2;
|
||||
required bool ack = 3;
|
||||
required int64 timestamp = 4;
|
||||
required Payload message = 5;
|
||||
}
|
||||
|
||||
// DurableProducerQueue
|
||||
message Cleanup {
|
||||
repeated string qualifiers = 1;
|
||||
}
|
||||
|
||||
|
|
@ -43,13 +43,16 @@ akka {
|
|||
actor {
|
||||
serialization-identifiers {
|
||||
"akka.cluster.typed.internal.AkkaClusterTypedSerializer" = 28
|
||||
"akka.cluster.typed.internal.delivery.ReliableDeliverySerializer" = 36
|
||||
}
|
||||
serializers {
|
||||
typed-cluster = "akka.cluster.typed.internal.AkkaClusterTypedSerializer"
|
||||
reliable-delivery = "akka.cluster.typed.internal.delivery.ReliableDeliverySerializer"
|
||||
}
|
||||
serialization-bindings {
|
||||
"akka.cluster.typed.internal.receptionist.ClusterReceptionist$Entry" = typed-cluster
|
||||
"akka.actor.typed.internal.pubsub.TopicImpl$MessagePublished" = typed-cluster
|
||||
"akka.actor.typed.delivery.internal.DeliverySerializable" = reliable-delivery
|
||||
}
|
||||
}
|
||||
cluster.configuration-compatibility-check.checkers {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,241 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.typed.internal.delivery
|
||||
|
||||
import java.io.NotSerializableException
|
||||
|
||||
import akka.util.ccompat.JavaConverters._
|
||||
import akka.actor.typed.ActorRefResolver
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.delivery.DurableProducerQueue
|
||||
import akka.actor.typed.delivery.ProducerController
|
||||
import akka.actor.typed.delivery.internal.ProducerControllerImpl
|
||||
import akka.actor.typed.scaladsl.adapter._
|
||||
import akka.annotation.InternalApi
|
||||
import akka.cluster.typed.internal.protobuf.ReliableDelivery
|
||||
import akka.cluster.typed.internal.protobuf.ReliableDelivery.Confirmed
|
||||
import akka.remote.serialization.WrappedPayloadSupport
|
||||
import akka.serialization.BaseSerializer
|
||||
import akka.serialization.SerializerWithStringManifest
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] class ReliableDeliverySerializer(val system: akka.actor.ExtendedActorSystem)
|
||||
extends SerializerWithStringManifest
|
||||
with BaseSerializer {
|
||||
|
||||
private val payloadSupport = new WrappedPayloadSupport(system)
|
||||
// lazy because Serializers are initialized early on. `toTyped` might then try to
|
||||
// initialize the classic ActorSystemAdapter extension.
|
||||
private lazy val resolver = ActorRefResolver(system.toTyped)
|
||||
|
||||
private val SequencedMessageManifest = "a"
|
||||
private val AckManifest = "b"
|
||||
private val RequestManifest = "c"
|
||||
private val ResendManifest = "d"
|
||||
private val RegisterConsumerManifest = "e"
|
||||
|
||||
private val DurableQueueMessageSentManifest = "f"
|
||||
private val DurableQueueConfirmedManifest = "g"
|
||||
private val DurableQueueStateManifest = "h"
|
||||
private val DurableQueueCleanupManifest = "i"
|
||||
|
||||
override def manifest(o: AnyRef): String = o match {
|
||||
case _: ConsumerController.SequencedMessage[_] => SequencedMessageManifest
|
||||
case _: ProducerControllerImpl.Ack => AckManifest
|
||||
case _: ProducerControllerImpl.Request => RequestManifest
|
||||
case _: ProducerControllerImpl.Resend => ResendManifest
|
||||
case _: ProducerController.RegisterConsumer[_] => RegisterConsumerManifest
|
||||
case _: DurableProducerQueue.MessageSent[_] => DurableQueueMessageSentManifest
|
||||
case _: DurableProducerQueue.Confirmed => DurableQueueConfirmedManifest
|
||||
case _: DurableProducerQueue.State[_] => DurableQueueStateManifest
|
||||
case _: DurableProducerQueue.Cleanup => DurableQueueCleanupManifest
|
||||
case _ =>
|
||||
throw new IllegalArgumentException(s"Can't serialize object of type ${o.getClass} in [${getClass.getName}]")
|
||||
}
|
||||
|
||||
override def toBinary(o: AnyRef): Array[Byte] = o match {
|
||||
case m: ConsumerController.SequencedMessage[_] => sequencedMessageToBinary(m)
|
||||
case m: ProducerControllerImpl.Ack => ackToBinary(m)
|
||||
case m: ProducerControllerImpl.Request => requestToBinary(m)
|
||||
case m: ProducerControllerImpl.Resend => resendToBinary(m)
|
||||
case m: ProducerController.RegisterConsumer[_] => registerConsumerToBinary(m)
|
||||
case m: DurableProducerQueue.MessageSent[_] => durableQueueMessageSentToBinary(m)
|
||||
case m: DurableProducerQueue.Confirmed => durableQueueConfirmedToBinary(m)
|
||||
case m: DurableProducerQueue.State[_] => durableQueueStateToBinary(m)
|
||||
case m: DurableProducerQueue.Cleanup => durableQueueCleanupToBinary(m)
|
||||
case _ =>
|
||||
throw new IllegalArgumentException(s"Cannot serialize object of type [${o.getClass.getName}]")
|
||||
}
|
||||
|
||||
private def sequencedMessageToBinary(m: ConsumerController.SequencedMessage[_]): Array[Byte] = {
|
||||
val b = ReliableDelivery.SequencedMessage.newBuilder()
|
||||
b.setProducerId(m.producerId)
|
||||
b.setSeqNr(m.seqNr)
|
||||
b.setFirst(m.first)
|
||||
b.setAck(m.ack)
|
||||
b.setProducerControllerRef(resolver.toSerializationFormat(m.producerController))
|
||||
b.setMessage(payloadSupport.payloadBuilder(m.message))
|
||||
b.build().toByteArray()
|
||||
}
|
||||
|
||||
private def ackToBinary(m: ProducerControllerImpl.Ack): Array[Byte] = {
|
||||
val b = ReliableDelivery.Ack.newBuilder()
|
||||
b.setConfirmedSeqNr(m.confirmedSeqNr)
|
||||
b.build().toByteArray()
|
||||
}
|
||||
|
||||
private def requestToBinary(m: ProducerControllerImpl.Request): Array[Byte] = {
|
||||
val b = ReliableDelivery.Request.newBuilder()
|
||||
b.setConfirmedSeqNr(m.confirmedSeqNr)
|
||||
b.setRequestUpToSeqNr(m.requestUpToSeqNr)
|
||||
b.setSupportResend(m.supportResend)
|
||||
b.setViaTimeout(m.viaTimeout)
|
||||
b.build().toByteArray()
|
||||
}
|
||||
|
||||
private def resendToBinary(m: ProducerControllerImpl.Resend): Array[Byte] = {
|
||||
val b = ReliableDelivery.Resend.newBuilder()
|
||||
b.setFromSeqNr(m.fromSeqNr)
|
||||
b.build().toByteArray()
|
||||
}
|
||||
|
||||
private def registerConsumerToBinary(m: ProducerController.RegisterConsumer[_]): Array[Byte] = {
|
||||
val b = ReliableDelivery.RegisterConsumer.newBuilder()
|
||||
b.setConsumerControllerRef(resolver.toSerializationFormat(m.consumerController))
|
||||
b.build().toByteArray()
|
||||
}
|
||||
|
||||
private def durableQueueMessageSentToBinary(m: DurableProducerQueue.MessageSent[_]): Array[Byte] = {
|
||||
durableQueueMessageSentToProto(m).toByteArray()
|
||||
}
|
||||
|
||||
private def durableQueueMessageSentToProto(m: DurableProducerQueue.MessageSent[_]): ReliableDelivery.MessageSent = {
|
||||
val b = ReliableDelivery.MessageSent.newBuilder()
|
||||
b.setSeqNr(m.seqNr)
|
||||
b.setQualifier(m.confirmationQualifier)
|
||||
b.setAck(m.ack)
|
||||
b.setTimestamp(m.timestampMillis)
|
||||
b.setMessage(payloadSupport.payloadBuilder(m.message))
|
||||
b.build()
|
||||
}
|
||||
|
||||
private def durableQueueConfirmedToBinary(m: DurableProducerQueue.Confirmed): _root_.scala.Array[Byte] = {
|
||||
durableQueueConfirmedToProto(m.confirmationQualifier, m.seqNr, m.timestampMillis).toByteArray()
|
||||
}
|
||||
|
||||
private def durableQueueConfirmedToProto(
|
||||
qualifier: String,
|
||||
seqNr: DurableProducerQueue.SeqNr,
|
||||
timestampMillis: DurableProducerQueue.TimestampMillis): Confirmed = {
|
||||
val b = ReliableDelivery.Confirmed.newBuilder()
|
||||
b.setSeqNr(seqNr)
|
||||
b.setQualifier(qualifier)
|
||||
b.setTimestamp(timestampMillis)
|
||||
b.build()
|
||||
}
|
||||
|
||||
private def durableQueueStateToBinary(m: DurableProducerQueue.State[_]): Array[Byte] = {
|
||||
val b = ReliableDelivery.State.newBuilder()
|
||||
b.setCurrentSeqNr(m.currentSeqNr)
|
||||
b.setHighestConfirmedSeqNr(m.highestConfirmedSeqNr)
|
||||
b.addAllConfirmed(m.confirmedSeqNr.map {
|
||||
case (qualifier, (seqNr, timestamp)) => durableQueueConfirmedToProto(qualifier, seqNr, timestamp)
|
||||
}.asJava)
|
||||
b.addAllUnconfirmed(m.unconfirmed.map(durableQueueMessageSentToProto).asJava)
|
||||
b.build().toByteArray()
|
||||
}
|
||||
|
||||
private def durableQueueCleanupToBinary(m: DurableProducerQueue.Cleanup): Array[Byte] = {
|
||||
val b = ReliableDelivery.Cleanup.newBuilder()
|
||||
b.addAllQualifiers(m.confirmationQualifiers.asJava)
|
||||
b.build().toByteArray()
|
||||
}
|
||||
|
||||
override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = manifest match {
|
||||
case SequencedMessageManifest => sequencedMessageFromBinary(bytes)
|
||||
case AckManifest => ackFromBinary(bytes)
|
||||
case RequestManifest => requestFromBinary(bytes)
|
||||
case ResendManifest => resendFromBinary(bytes)
|
||||
case RegisterConsumerManifest => registerConsumerFromBinary(bytes)
|
||||
case DurableQueueMessageSentManifest => durableQueueMessageSentFromBinary(bytes)
|
||||
case DurableQueueConfirmedManifest => durableQueueConfirmedFromBinary(bytes)
|
||||
case DurableQueueStateManifest => durableQueueStateFromBinary(bytes)
|
||||
case DurableQueueCleanupManifest => durableQueueCleanupFromBinary(bytes)
|
||||
case _ =>
|
||||
throw new NotSerializableException(
|
||||
s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]")
|
||||
}
|
||||
|
||||
private def sequencedMessageFromBinary(bytes: Array[Byte]): AnyRef = {
|
||||
val seqMsg = ReliableDelivery.SequencedMessage.parseFrom(bytes)
|
||||
val wrappedMsg = payloadSupport.deserializePayload(seqMsg.getMessage)
|
||||
ConsumerController.SequencedMessage(
|
||||
seqMsg.getProducerId,
|
||||
seqMsg.getSeqNr,
|
||||
wrappedMsg,
|
||||
seqMsg.getFirst,
|
||||
seqMsg.getAck)(resolver.resolveActorRef(seqMsg.getProducerControllerRef))
|
||||
}
|
||||
|
||||
private def ackFromBinary(bytes: Array[Byte]): AnyRef = {
|
||||
val ack = ReliableDelivery.Ack.parseFrom(bytes)
|
||||
ProducerControllerImpl.Ack(ack.getConfirmedSeqNr)
|
||||
}
|
||||
|
||||
private def requestFromBinary(bytes: Array[Byte]): AnyRef = {
|
||||
val req = ReliableDelivery.Request.parseFrom(bytes)
|
||||
ProducerControllerImpl.Request(
|
||||
req.getConfirmedSeqNr,
|
||||
req.getRequestUpToSeqNr,
|
||||
req.getSupportResend,
|
||||
req.getViaTimeout)
|
||||
}
|
||||
|
||||
private def resendFromBinary(bytes: Array[Byte]): AnyRef = {
|
||||
val resend = ReliableDelivery.Resend.parseFrom(bytes)
|
||||
ProducerControllerImpl.Resend(resend.getFromSeqNr)
|
||||
}
|
||||
|
||||
private def registerConsumerFromBinary(bytes: Array[Byte]): AnyRef = {
|
||||
val reg = ReliableDelivery.RegisterConsumer.parseFrom(bytes)
|
||||
ProducerController.RegisterConsumer(
|
||||
resolver.resolveActorRef[ConsumerController.Command[Any]](reg.getConsumerControllerRef))
|
||||
}
|
||||
|
||||
private def durableQueueMessageSentFromBinary(bytes: Array[Byte]): AnyRef = {
|
||||
val sent = ReliableDelivery.MessageSent.parseFrom(bytes)
|
||||
durableQueueMessageSentFromProto(sent)
|
||||
}
|
||||
|
||||
private def durableQueueMessageSentFromProto(
|
||||
sent: ReliableDelivery.MessageSent): DurableProducerQueue.MessageSent[Any] = {
|
||||
val wrappedMsg = payloadSupport.deserializePayload(sent.getMessage)
|
||||
DurableProducerQueue.MessageSent(sent.getSeqNr, wrappedMsg, sent.getAck, sent.getQualifier, sent.getTimestamp)
|
||||
}
|
||||
|
||||
private def durableQueueConfirmedFromBinary(bytes: Array[Byte]): AnyRef = {
|
||||
val confirmed = ReliableDelivery.Confirmed.parseFrom(bytes)
|
||||
DurableProducerQueue.Confirmed(confirmed.getSeqNr, confirmed.getQualifier, confirmed.getTimestamp)
|
||||
}
|
||||
|
||||
private def durableQueueStateFromBinary(bytes: Array[Byte]): AnyRef = {
|
||||
val state = ReliableDelivery.State.parseFrom(bytes)
|
||||
DurableProducerQueue.State(
|
||||
state.getCurrentSeqNr,
|
||||
state.getHighestConfirmedSeqNr,
|
||||
state.getConfirmedList.asScala
|
||||
.map(confirmed => confirmed.getQualifier -> (confirmed.getSeqNr -> confirmed.getTimestamp))
|
||||
.toMap,
|
||||
state.getUnconfirmedList.asScala.toVector.map(durableQueueMessageSentFromProto))
|
||||
}
|
||||
|
||||
private def durableQueueCleanupFromBinary(bytes: Array[Byte]): AnyRef = {
|
||||
val cleanup = ReliableDelivery.Cleanup.parseFrom(bytes)
|
||||
DurableProducerQueue.Cleanup(cleanup.getQualifiersList.iterator.asScala.toSet)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.typed.internal.delivery
|
||||
|
||||
import akka.actor.ExtendedActorSystem
|
||||
import akka.actor.testkit.typed.scaladsl.LogCapturing
|
||||
import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.delivery.DurableProducerQueue
|
||||
import akka.actor.typed.delivery.ProducerController
|
||||
import akka.actor.typed.delivery.internal.ProducerControllerImpl
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.actor.typed.scaladsl.adapter._
|
||||
import akka.serialization.SerializationExtension
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
class ReliableDeliverySerializerSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogCapturing {
|
||||
|
||||
private val classicSystem = system.toClassic
|
||||
private val serializer = new ReliableDeliverySerializer(classicSystem.asInstanceOf[ExtendedActorSystem])
|
||||
private val ref = spawn(Behaviors.empty[Any])
|
||||
|
||||
"ReliableDeliverySerializer" must {
|
||||
|
||||
val timestamp = System.currentTimeMillis()
|
||||
Seq(
|
||||
"SequencedMessage-1" -> ConsumerController.SequencedMessage("prod-1", 17L, "msg17", false, false)(ref),
|
||||
"SequencedMessage-2" -> ConsumerController.SequencedMessage("prod-1", 1L, "msg01", true, true)(ref),
|
||||
"Ack" -> ProducerControllerImpl.Ack(5L),
|
||||
"Request" -> ProducerControllerImpl.Request(5L, 25L, true, true),
|
||||
"Resend" -> ProducerControllerImpl.Resend(5L),
|
||||
"RegisterConsumer" -> ProducerController.RegisterConsumer(ref),
|
||||
"DurableProducerQueue.MessageSent-1" -> DurableProducerQueue.MessageSent(3L, "msg03", false, "", timestamp),
|
||||
"DurableProducerQueue.MessageSent-2" -> DurableProducerQueue.MessageSent(3L, "msg03", true, "q1", timestamp),
|
||||
"DurableProducerQueue.Confirmed" -> DurableProducerQueue.Confirmed(3L, "q2", timestamp),
|
||||
"DurableProducerQueue.State-1" -> DurableProducerQueue.State(3L, 2L, Map.empty, Vector.empty),
|
||||
"DurableProducerQueue.State-2" -> DurableProducerQueue.State(
|
||||
3L,
|
||||
2L,
|
||||
Map("" -> (2L -> timestamp)),
|
||||
Vector(DurableProducerQueue.MessageSent(3L, "msg03", false, "", timestamp))),
|
||||
"DurableProducerQueue.State-3" -> DurableProducerQueue.State(
|
||||
17L,
|
||||
12L,
|
||||
Map(
|
||||
"q1" -> (5L -> timestamp),
|
||||
"q2" -> (7L -> timestamp),
|
||||
"q3" -> (12L -> timestamp),
|
||||
"q4" -> (14L -> timestamp)),
|
||||
Vector(
|
||||
DurableProducerQueue.MessageSent(15L, "msg15", true, "q4", timestamp),
|
||||
DurableProducerQueue.MessageSent(16L, "msg16", true, "q4", timestamp))),
|
||||
"DurableProducerQueue.Cleanup" -> DurableProducerQueue.Cleanup(Set("q1", "q2", "q3"))).foreach {
|
||||
case (scenario, item) =>
|
||||
s"resolve serializer for $scenario" in {
|
||||
val serializer = SerializationExtension(classicSystem)
|
||||
serializer.serializerFor(item.getClass).getClass should be(classOf[ReliableDeliverySerializer])
|
||||
}
|
||||
|
||||
s"serialize and de-serialize $scenario" in {
|
||||
verifySerialization(item)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def verifySerialization(msg: AnyRef): Unit = {
|
||||
serializer.fromBinary(serializer.toBinary(msg), serializer.manifest(msg)) should be(msg)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -29,6 +29,7 @@ that the module or API wasn't useful.
|
|||
These are the current complete modules marked as **may change**:
|
||||
|
||||
* @ref:[Multi Node Testing](../multi-node-testing.md)
|
||||
* @ref:[Reliable Delivery](../typed/reliable-delivery.md)
|
||||
* @ref:[Sharded Daemon Process](../typed/cluster-sharded-daemon-process.md)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -281,8 +281,7 @@ The third becomes necessary by virtue of the acknowledgements not being guarante
|
|||
to arrive either.
|
||||
|
||||
An ACK-RETRY protocol with business-level acknowledgements and de-duplication using identifiers is
|
||||
supported by the @ref:[At-Least-Once Delivery](../persistence.md#at-least-once-delivery) of the Classic Akka Persistence module.
|
||||
Corresponding functionality for typed has not yet been implemented (see [issue #20984](https://github.com/akka/akka/issues/20984)).
|
||||
supported by the @ref:[Reliable Delivery](../typed/reliable-delivery.md) feature.
|
||||
|
||||
Another way of implementing the third part would be to make processing the messages
|
||||
idempotent on the level of the business logic.
|
||||
|
|
|
|||
|
|
@ -41,6 +41,13 @@ so that one Cluster can span multiple data centers and still be tolerant to netw
|
|||
|
||||
<!--- #cluster-multidc --->
|
||||
|
||||
<!--- #reliable-delivery --->
|
||||
### Reliable Delivery
|
||||
|
||||
Reliable delivery and flow control of messages between actors in the Cluster.
|
||||
|
||||
<!--- #reliable-delivery --->
|
||||
|
||||
<!--- #sharding-persistence-mode-deprecated --->
|
||||
@@@ warning
|
||||
|
||||
|
|
|
|||
|
|
@ -115,11 +115,10 @@ actor the order of the messages is preserved. As long as the buffer limit is not
|
|||
messages are delivered on a best effort basis, with at-most once delivery semantics,
|
||||
in the same way as ordinary message sending.
|
||||
|
||||
#### AtLeastOnceDelivery
|
||||
### Reliable delivery
|
||||
|
||||
Reliable end-to-end messaging, with at-least-once semantics can be added by using
|
||||
`AtLeastOnceDelivery` with @ref:[Classic Persistence](../persistence.md#at-least-once-delivery),
|
||||
and see @github[#20984](#20984) AtLeastOnceDelivery, including redelivery with a backoff.
|
||||
Reliable end-to-end messaging, with at-least-once semantics can be added by using the
|
||||
@ref:[Reliable Delivery](reliable-delivery.md#sharding) feature.
|
||||
|
||||
### Overhead
|
||||
|
||||
|
|
|
|||
|
|
@ -444,6 +444,9 @@ See @ref:[Distributed Data](distributed-data.md).
|
|||
@@include[cluster.md](../includes/cluster.md) { #cluster-multidc }
|
||||
See @ref:[Cluster Multi-DC](cluster-dc.md).
|
||||
|
||||
@@include[cluster.md](../includes/cluster.md) { #reliable-delivery }
|
||||
See @ref:[Reliable Delivery](reliable-delivery.md)
|
||||
|
||||
## Example project
|
||||
|
||||
@java[@extref[Cluster example project](samples:akka-samples-cluster-java)]
|
||||
|
|
|
|||
BIN
akka-docs/src/main/paradox/typed/images/delivery-p2p-1.png
Normal file
BIN
akka-docs/src/main/paradox/typed/images/delivery-p2p-1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 80 KiB |
BIN
akka-docs/src/main/paradox/typed/images/delivery-sharding-1.png
Normal file
BIN
akka-docs/src/main/paradox/typed/images/delivery-sharding-1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 124 KiB |
BIN
akka-docs/src/main/paradox/typed/images/delivery-sharding-2.png
Normal file
BIN
akka-docs/src/main/paradox/typed/images/delivery-sharding-2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 155 KiB |
BIN
akka-docs/src/main/paradox/typed/images/delivery-sharding-3.png
Normal file
BIN
akka-docs/src/main/paradox/typed/images/delivery-sharding-3.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 183 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 133 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 167 KiB |
|
|
@ -18,6 +18,7 @@ project.description: Akka Cluster concepts, node membership service, CRDT Distri
|
|||
* [sharded-daemon-process](cluster-sharded-daemon-process.md)
|
||||
* [cluster-dc](cluster-dc.md)
|
||||
* [distributed-pub-sub](distributed-pub-sub.md)
|
||||
* [reliable-delivery](reliable-delivery.md)
|
||||
* [serialization](../serialization.md)
|
||||
* [serialization-jackson](../serialization-jackson.md)
|
||||
* [multi-jvm-testing](../multi-jvm-testing.md)
|
||||
|
|
|
|||
413
akka-docs/src/main/paradox/typed/reliable-delivery.md
Normal file
413
akka-docs/src/main/paradox/typed/reliable-delivery.md
Normal file
|
|
@ -0,0 +1,413 @@
|
|||
---
|
||||
project.description: Reliable delivery and flow control of messages between actors.
|
||||
---
|
||||
# Reliable delivery
|
||||
|
||||
For the Akka Classic documentation of this feature see @ref:[Classic At-Least-Once Delivery](../persistence.md#at-least-once-delivery).
|
||||
|
||||
@@@ warning
|
||||
|
||||
This module is currently marked as @ref:[may change](../common/may-change.md) because it is a new feature that
|
||||
needs feedback from real usage before finalizing the API. This means that API or semantics can change without
|
||||
warning or deprecation period. It is also not recommended to use this module in production just yet.
|
||||
|
||||
@@@
|
||||
|
||||
## Module info
|
||||
|
||||
To use reliable delivery, add the module to your project:
|
||||
|
||||
@@dependency[sbt,Maven,Gradle] {
|
||||
group=com.typesafe.akka
|
||||
artifact=akka-actor-typed_$scala.binary_version$
|
||||
version=$akka.version$
|
||||
}
|
||||
|
||||
## Introduction
|
||||
|
||||
Normal @ref:[message delivery reliability](../general/message-delivery-reliability.md) is at-most once delivery, which
|
||||
means that messages may be lost. That should be rare, but still possible.
|
||||
|
||||
For interactions between some actors that is not acceptable and at-least once delivery or effectively once processing
|
||||
is needed. The tools for reliable delivery described here help with implementing that. It can't be achieved
|
||||
automatically under the hood without collaboration from the application because confirming when a message has been
|
||||
fully processed is a business level concern. Only ensuring that it was transferred over the network or delivered to
|
||||
the mailbox of the actor would not be enough, since it may crash right after without being processed.
|
||||
|
||||
Lost messages are detected, resent and deduplicated as needed. In addition, it also includes flow control for
|
||||
the sending of messages to avoid that a fast producer overwhelms a slower consumer or sends messages at
|
||||
a higher rate than what can be transferred over the network. This can be a common problem in interactions between
|
||||
actors, resulting in fatal errors like `OutOfMemoryError` because too many messages are queued in the mailboxes
|
||||
of the actors. The detection of lost messages and the flow control is driven by the consumer side, which means
|
||||
that the producer side will not send faster than the demand requested by the consumer side. The producer side will
|
||||
not push resends unless requested by the consumer side.
|
||||
|
||||
There are 3 supported patterns, which are described in the following sections:
|
||||
|
||||
* @ref:[Point-to-point](#point-to-point)
|
||||
* @ref:[Work pulling](#work-pulling)
|
||||
* @ref:[Sharding](#sharding)
|
||||
|
||||
## Point-to-point
|
||||
|
||||
Point-to-point reliable delivery between a single producer actor sending messages and a single consumer actor
|
||||
receiving the messages.
|
||||
|
||||
Messages are sent from the producer to @apidoc[ProducerController] and via @apidoc[ConsumerController] actors, which
|
||||
handle the delivery and confirmation of the processing in the destination consumer actor.
|
||||
|
||||

|
||||
|
||||
The producer actor will start the flow by sending a `ProducerController.Start` message to
|
||||
the `ProducerController`.
|
||||
|
||||
The `ProducerController` sends `RequestNext` to the producer, which is then allowed to send one
|
||||
message to the `ProducerController`. Thereafter the producer will receive a new `RequestNext`
|
||||
when it's allowed to send one more message.
|
||||
|
||||
The producer and `ProducerController` actors are supposed to be local so that these messages are
|
||||
fast and not lost. This is enforced by a runtime check.
|
||||
|
||||
Similarly, on the consumer side the destination consumer actor will start the flow by sending an
|
||||
initial `ConsumerController.Start` message to the `ConsumerController`.
|
||||
|
||||
For the `ProducerController` to know where to send the messages it must be connected with the
|
||||
`ConsumerController`. You do this is with `ProducerController.RegisterConsumer` or
|
||||
`ConsumerController.RegisterToProducerController` messages. When using the the point-to-point pattern
|
||||
it is the application's responsibility to connect them together. For example, by sending the `ActorRef`
|
||||
in an ordinary message to the other side, or register the `ActorRef` in the @ref:[Receptionist](actor-discovery.md)
|
||||
and find it on the other side.
|
||||
|
||||
You must also take measures to reconnect them if any of the sides crashes, for example by watching it
|
||||
for termination.
|
||||
|
||||
Received messages from the producer are wrapped in `ConsumerController.Delivery` when sent to the consumer,
|
||||
which is supposed to reply with `ConsumerController.Confirmed` when it has processed the message.
|
||||
Next message is not delivered until the previous is confirmed. More messages from the producer that arrive
|
||||
while waiting for the confirmation are stashed by the `ConsumerController` and delivered when the previous
|
||||
message is confirmed.
|
||||
|
||||
The consumer and the `ConsumerController` actors are supposed to be local so that these messages are fast
|
||||
and not lost. This is enforced by a runtime check.
|
||||
|
||||
Many unconfirmed messages can be in flight between the `ProducerController` and `ConsumerController`, but
|
||||
it is limited by a flow control window. The flow control is driven by the consumer side, which means that
|
||||
the `ProducerController` will not send faster than the demand requested by the `ConsumerController`.
|
||||
|
||||
### Point-to-point example
|
||||
|
||||
An example of a fibonacci number generator (producer):
|
||||
|
||||
Scala
|
||||
: @@snip [PointToPointDocExample.scala](/akka-cluster-sharding-typed/src/test/scala/docs/delivery/PointToPointDocExample.scala) { #imports #producer }
|
||||
|
||||
Java
|
||||
: @@snip [PointToPointDocExample.java](/akka-cluster-sharding-typed/src/test/java/jdocs/delivery/PointToPointDocExample.java) { #imports #producer }
|
||||
|
||||
and consumer of the fibonacci numbers:
|
||||
|
||||
Scala
|
||||
: @@snip [PointToPointDocExample.scala](/akka-cluster-sharding-typed/src/test/scala/docs/delivery/PointToPointDocExample.scala) { #consumer }
|
||||
|
||||
Java
|
||||
: @@snip [PointToPointDocExample.java](/akka-cluster-sharding-typed/src/test/java/jdocs/delivery/PointToPointDocExample.java) { #consumer }
|
||||
|
||||
The `FibonacciProducer` sends the messages to a `ProducerController`. The `FibonacciConsumer` receives the messages
|
||||
from a `ConsumerController`. Note how the `ActorRef` in the `Start` messages are constructed as message adapters to map
|
||||
the `RequestNext` and `Delivery` to the protocol of the producer and consumer actors respectively.
|
||||
|
||||
The `ConsumerController` and `ProducerController` are connected via the `ConsumerController.RegisterToProducerController`
|
||||
message. The `ActorRef` of the `ProducerController` can be shared between producer and consumer sides with ordinary
|
||||
messages, or by using the `Receptionist`. Alternatively, they can be connected in the other direction by sending
|
||||
`ProducerController.RegisterConsumer` to the `ProducerController`.
|
||||
|
||||
Scala
|
||||
: @@snip [PointToPointDocExample.scala](/akka-cluster-sharding-typed/src/test/scala/docs/delivery/PointToPointDocExample.scala) { #connect }
|
||||
|
||||
Java
|
||||
: @@snip [PointToPointDocExample.java](/akka-cluster-sharding-typed/src/test/java/jdocs/delivery/PointToPointDocExample.java) { #connect }
|
||||
|
||||
### Point-to-point delivery semantics
|
||||
|
||||
As long as neither producer nor consumer crash the messages are delivered to the consumer actor in the same order
|
||||
as they were sent as they were sent to the `ProducerController`, without loss or duplicates. Meaning effectively
|
||||
once processing without any business level deduplication.
|
||||
|
||||
Unconfirmed messages may be lost if the producer crashes. To avoid that you need to enable the @ref:[durable
|
||||
queue](#durable-producer) on the producer side. The stored unconfirmed messages will be redelivered when the
|
||||
corresponding producer is started again. Even if the same `ConsumerController` instance is used there may be
|
||||
delivery of messages that had already been processed but the fact that they were confirmed had not been stored yet.
|
||||
Meaning at-least once delivery.
|
||||
|
||||
If the consumer crashes a new `ConsumerController` can be connected to the original `ProducerConsumer`
|
||||
without restarting it. The `ProducerConsumer` will then redeliver all unconfirmed messages. In that case
|
||||
the unconfirmed messages will be delivered to the new consumer, and some of these may already have been
|
||||
processed by the previous consumer.
|
||||
Meaning at-least once delivery.
|
||||
|
||||
## Work pulling
|
||||
|
||||
Work pulling is a pattern where several worker actors pull tasks in their own pace from
|
||||
a shared work manager instead of that the manager pushes work to the workers blindly
|
||||
without knowing their individual capacity and current availability.
|
||||
|
||||
One important property is that the order of the messages should not matter, because each
|
||||
message is routed randomly to one of the workers with demand. In other words, two subsequent
|
||||
messages may be routed to two different workers and processed independent of each other.
|
||||
|
||||
Messages are sent from the producer to @apidoc[WorkPullingProducerController] and via @apidoc[ConsumerController]
|
||||
actors, which handle the delivery and confirmation of the processing in the destination worker (consumer) actor.
|
||||
|
||||

|
||||
|
||||
and adding another worker
|
||||
|
||||

|
||||
|
||||
A worker actor (consumer) and its `ConsumerController` is dynamically registered to the
|
||||
`WorkPullingProducerController` via a `ServiceKey`. It will register itself to the
|
||||
@ref:[Receptionist](actor-discovery.md), and the `WorkPullingProducerController`
|
||||
subscribes to the same key to find active workers. In this way workers can be dynamically
|
||||
added or removed from any node in the cluster.
|
||||
|
||||
The work manager (producer) actor will start the flow by sending a `WorkPullingProducerController.Start`
|
||||
message to the `WorkPullingProducerController`.
|
||||
|
||||
The `WorkPullingProducerController` sends `RequestNext` to the producer, which is then allowed
|
||||
to send one message to the `WorkPullingProducerController`.
|
||||
Thereafter the producer will receive a new `RequestNext` when it's allowed to send one more message.
|
||||
`WorkPullingProducerController` will send a new `RequestNext` when there is a demand from any worker.
|
||||
It's possible that all workers with demand are deregistered after the `RequestNext` is sent and before
|
||||
the actual messages is sent to the `WorkPullingProducerController`. In that case the message is
|
||||
buffered and will be delivered when a new worker is registered or when there is a new demand.
|
||||
|
||||
The producer and `WorkPullingProducerController` actors are supposed to be local so that these messages are
|
||||
fast and not lost. This is enforced by a runtime check.
|
||||
|
||||
Similarly, on the consumer side the destination consumer actor will start the flow by sending an
|
||||
initial `ConsumerController.Start` message to the `ConsumerController`.
|
||||
|
||||
Received messages from the producer are wrapped in `ConsumerController.Delivery` when sent to the consumer,
|
||||
which is supposed to reply with `ConsumerController.Confirmed` when it has processed the message.
|
||||
Next message is not delivered until the previous is confirmed. More messages from the producer that arrive
|
||||
while waiting for the confirmation are stashed by the `ConsumerController` and delivered when the previous
|
||||
message is confirmed.
|
||||
|
||||
The consumer and the `ConsumerController` actors are supposed to be local so that these messages are fast
|
||||
and not lost. This is enforced by a runtime check.
|
||||
|
||||
Many unconfirmed messages can be in flight between the `WorkPullingProducerController` and each
|
||||
`ConsumerController`, but it is limited by a flow control window. The flow control is driven by the
|
||||
consumer side, which means that the `WorkPullingProducerController` will not send faster than the
|
||||
demand requested by the workers.
|
||||
|
||||
### Work pulling example
|
||||
|
||||
Example of image converter worker (consumer):
|
||||
|
||||
Scala
|
||||
: @@snip [WorkPullingDocExample.scala](/akka-cluster-sharding-typed/src/test/scala/docs/delivery/WorkPullingDocExample.scala) { #imports #consumer }
|
||||
|
||||
Java
|
||||
: @@snip [WorkPullingDocExample.java](/akka-cluster-sharding-typed/src/test/java/jdocs/delivery/WorkPullingDocExample.java) { #imports #consumer }
|
||||
|
||||
and image converter job manager (producer):
|
||||
|
||||
Scala
|
||||
: @@snip [WorkPullingDocExample.scala](/akka-cluster-sharding-typed/src/test/scala/docs/delivery/WorkPullingDocExample.scala) { #producer }
|
||||
|
||||
Java
|
||||
: @@snip [WorkPullingDocExample.java](/akka-cluster-sharding-typed/src/test/java/jdocs/delivery/WorkPullingDocExample.java) { #producer }
|
||||
|
||||
Note how the `ActorRef` in the `Start` messages are constructed as message adapters to map the
|
||||
`RequestNext` and `Delivery` to the protocol of the producer and consumer actors respectively.
|
||||
|
||||
See also the corresponding @ref:[example that is using ask from the producer](#ask-from-the-producer).
|
||||
|
||||
### Work pulling delivery semantics
|
||||
|
||||
For work pulling the order of the messages should not matter, because each message is routed randomly
|
||||
to one of the workers with demand and can therefore be processed in any order.
|
||||
|
||||
As long as neither producers nor workers crash (or workers being removed for other reasons) the messages are
|
||||
delivered to the workers without loss or duplicates. Meaning effectively once processing without any
|
||||
business level deduplication.
|
||||
|
||||
Unconfirmed messages may be lost if the producer crashes. To avoid that you need to enable the @ref:[durable
|
||||
queue](#durable-producer) on the producer side. The stored unconfirmed messages will be redelivered when the
|
||||
corresponding producer is started again. Those messages may be routed to different workers than before
|
||||
and some of them may have already been processed but the fact that they were confirmed had not been stored
|
||||
yet. Meaning at-least once delivery.
|
||||
|
||||
If a worker crashes or is stopped gracefully the unconfirmed messages will be redelivered to other workers.
|
||||
In that case some of these may already have been processed by the previous worker. Meaning at-least once delivery.
|
||||
|
||||
## Sharding
|
||||
|
||||
To use reliable delivery with Cluster Sharding, add the following module to your project:
|
||||
|
||||
@@dependency[sbt,Maven,Gradle] {
|
||||
group=com.typesafe.akka
|
||||
artifact=akka-cluster-sharding-typed_$scala.binary_version$
|
||||
version=$akka.version$
|
||||
}
|
||||
|
||||
Reliable delivery between a producer actor sending messages to @ref:[sharded](cluster-sharding.md) consumer
|
||||
actor receiving the messages.
|
||||
|
||||

|
||||
|
||||
and sending to another entity
|
||||
|
||||

|
||||
|
||||
and sending from another producer (different node)
|
||||
|
||||

|
||||
|
||||
The @apidoc[ShardingProducerController] should be used together with @apidoc[ShardingConsumerController].
|
||||
|
||||
A producer can send messages via a `ShardingProducerController` to any `ShardingConsumerController`
|
||||
identified by an `entityId`. A single `ShardingProducerController` per `ActorSystem` (node) can be
|
||||
shared for sending to all entities of a certain entity type. No explicit registration is needed
|
||||
between the `ShardingConsumerController` and `ShardingProducerController`.
|
||||
|
||||
The producer actor will start the flow by sending a `ShardingProducerController.Start`
|
||||
message to the `ShardingProducerController`.
|
||||
|
||||
The `ShardingProducerController` sends `RequestNext` to the producer, which is then allowed
|
||||
to send one message to the `ShardingProducerController`. Thereafter the producer will receive a
|
||||
new `RequestNext` when it's allowed to send one more message.
|
||||
|
||||
In the @apidoc[ShardingProducerController.RequestNext] message there is information about which entities
|
||||
that have demand. It is allowed to send to a new `entityId` that is not included in the `RequestNext.entitiesWithDemand`.
|
||||
If sending to an entity that doesn't have demand the message will be buffered. This support for buffering
|
||||
means that it is even allowed to send several messages in response to one `RequestNext` but it's recommended to
|
||||
only send one message and wait for next `RequestNext` before sending more messages.
|
||||
|
||||
The producer and `ShardingProducerController` actors are supposed to be local so that these messages are
|
||||
fast and not lost. This is enforced by a runtime check.
|
||||
|
||||
Similarly, on the consumer side the destination consumer actor will start the flow by sending an
|
||||
initial `ConsumerController.Start` message to the `ConsumerController`.
|
||||
|
||||
There will be one `ShardingConsumerController` for each entity. Many unconfirmed messages can be in
|
||||
flight between the `ShardingProducerController` and each `ShardingConsumerController`, but it is
|
||||
limited by a flow control window. The flow control is driven by the consumer side, which means that
|
||||
the `ShardingProducerController` will not send faster than the demand requested by the consumers.
|
||||
|
||||
### Sharding example
|
||||
|
||||
The sharded entity is a todo list which uses an async database call to store its entire state on each change,
|
||||
and first when that completes replies to reliable delivery that the message was consumed.
|
||||
|
||||
Example of `TodoList` entity (consumer):
|
||||
|
||||
Scala
|
||||
: @@snip [ShardingDocExample.scala](/akka-cluster-sharding-typed/src/test/scala/docs/delivery/ShardingDocExample.scala) { #imports #consumer }
|
||||
|
||||
Java
|
||||
: @@snip [ShardingDocExample.java](/akka-cluster-sharding-typed/src/test/java/jdocs/delivery/ShardingDocExample.java) { #imports #consumer }
|
||||
|
||||
and `TodoService` (producer):
|
||||
|
||||
Scala
|
||||
: @@snip [ShardingDocExample.scala](/akka-cluster-sharding-typed/src/test/scala/docs/delivery/ShardingDocExample.scala) { #producer }
|
||||
|
||||
Java
|
||||
: @@snip [ShardingDocExample.java](/akka-cluster-sharding-typed/src/test/java/jdocs/delivery/ShardingDocExample.java) { #producer }
|
||||
|
||||
Note how the `ActorRef` in the `Start` messages are constructed as message adapters to map the
|
||||
`RequestNext` and `Delivery` to the protocol of the producer and consumer actors respectively.
|
||||
|
||||
Those are initialized with sharding like this (from the guardian):
|
||||
|
||||
Java
|
||||
: @@snip [ShardingDocExample.java](/akka-cluster-sharding-typed/src/test/java/jdocs/delivery/ShardingDocExample.java) { #init }
|
||||
|
||||
### Sharding delivery semantics
|
||||
|
||||
As long as neither producer nor consumer crash the messages are delivered to the consumer actor in the same order
|
||||
as they were sent to the `ShardingProducerController`, without loss or duplicates. Meaning effectively once
|
||||
processing without any business level deduplication.
|
||||
|
||||
Unconfirmed messages may be lost if the producer crashes. To avoid that you need to enable the @ref:[durable
|
||||
queue](#durable-producer) on the producer side. The stored unconfirmed messages will be redelivered when the
|
||||
corresponding producer is started again. In that case there may be delivery of messages that had already been
|
||||
processed but the fact that they were confirmed had not been stored yet. Meaning at-least once delivery.
|
||||
|
||||
If the consumer crashes or the shard is rebalanced the unconfirmed messages will be redelivered. In that case
|
||||
some of these may already have been processed by the previous consumer.
|
||||
|
||||
## Durable producer
|
||||
|
||||
Until sent messages have been confirmed the producer side keeps them in memory to be able to
|
||||
resend them. If the JVM of the producer side crashes those unconfirmed messages are lost.
|
||||
To make sure the messages can be delivered also in that scenario a @apidoc[DurableProducerQueue] can be used.
|
||||
Then the unconfirmed messages are stored in a durable way so that they can be redelivered when the producer
|
||||
is started again. An implementation of the `DurableProducerQueue` is provided by @apidoc[EventSourcedProducerQueue]
|
||||
in `akka-persistence-typed`.
|
||||
|
||||
Be aware of that a `DurableProducerQueue` will add a substantial performance overhead.
|
||||
|
||||
When using the `EventSourcedProducerQueue` the following dependency is needed:
|
||||
|
||||
@@dependency[sbt,Maven,Gradle] {
|
||||
group=com.typesafe.akka
|
||||
artifact=akka-persistence-typed_$scala.binary_version$
|
||||
version=$akka.version$
|
||||
}
|
||||
|
||||
You also have to select journal plugin and snapshot store plugin, see
|
||||
@ref:[Persistence Plugins](../persistence-plugins.md).
|
||||
|
||||
Example of the image converter work manager from the @ref:[Work pulling example](#work-pulling-example) with
|
||||
`EventSourcedProducerQueue` enabled:
|
||||
|
||||
Scala
|
||||
: @@snip [WorkPullingDocExample.scala](/akka-cluster-sharding-typed/src/test/scala/docs/delivery/WorkPullingDocExample.scala) { #durable-queue }
|
||||
|
||||
Java
|
||||
: @@snip [WorkPullingDocExample.java](/akka-cluster-sharding-typed/src/test/java/jdocs/delivery/WorkPullingDocExample.java) { #durable-queue }
|
||||
|
||||
It's important to note that the `EventSourcedProducerQueue` requires a @ref:[PersistenceId](persistence.md#persistenceid),
|
||||
which must be unique. The same `PersistenceId` must not be used for different producers at the same time.
|
||||
A @ref:[Cluster Singleton](cluster-singleton.md) hosting the producer would satisfy that requirement,
|
||||
or one producer per node and a naming scheme to ensure that different nodes use different `PersistenceId`.
|
||||
|
||||
To deliver unconfirmed messages after a crash the producer must be started again with same `PersistenceId`
|
||||
as before the crash.
|
||||
|
||||
## Ask from the producer
|
||||
|
||||
Instead of using `tell` with the `sendNextTo` in the `RequestNext` the producer can use `context.ask`
|
||||
with the `askNextTo` in the `RequestNext`. The difference is that a reply is sent back when the
|
||||
message has been handled. To include the `replyTo` `ActorRef` the message must be wrapped in a
|
||||
`MessageWithConfirmation`. If a `DurableProducerQueue` is used then the reply is sent when the message
|
||||
has been stored successfully, but it might not have been processed by the consumer yet. Otherwise the
|
||||
reply is sent after the consumer has processed and confirmed the message.
|
||||
|
||||
Example of using `ask` in the image converter work manager from the @ref:[Work pulling example](#work-pulling-example):
|
||||
|
||||
Scala
|
||||
: @@snip [WorkPullingDocExample.scala](/akka-cluster-sharding-typed/src/test/scala/docs/delivery/WorkPullingDocExample.scala) { #ask }
|
||||
|
||||
Java
|
||||
: @@snip [WorkPullingDocExample.java](/akka-cluster-sharding-typed/src/test/java/jdocs/delivery/WorkPullingDocExample.java) { #ask }
|
||||
|
||||
## Only flow control
|
||||
|
||||
It's possible to use this without resending lost messages, but the flow control is still used. This can
|
||||
for example be useful when both consumer and producer are know to be located in the same local `ActorSystem`.
|
||||
This can be more efficient since messages don't have to be kept in memory in the `ProducerController` until
|
||||
they have been confirmed, but the drawback is that lost messages will not be delivered. See configuration
|
||||
`only-flow-control` of the `ConsumerController`.
|
||||
|
||||
## Configuration
|
||||
|
||||
There are several configuration properties, please refer to `akka.reliable-delivery` config section in the
|
||||
reference configuration:
|
||||
|
||||
* @ref:[akka-actor-typed reference configuration](../general/configuration-reference.md#config-akka-actor-typed)
|
||||
* @ref:[akka-persistence-typed reference configuration](../general/configuration-reference.md#config-akka-persistence-typed)
|
||||
* @ref:[akka-cluster-sharding-typed reference configuration](../general/configuration-reference.md#config-cluster-sharding-typed)
|
||||
|
|
@ -23,3 +23,32 @@ akka.persistence.typed {
|
|||
log-stashing = off
|
||||
|
||||
}
|
||||
|
||||
akka.reliable-delivery {
|
||||
producer-controller {
|
||||
event-sourced-durable-queue {
|
||||
# Max duration for the exponential backoff for persist failures.
|
||||
restart-max-backoff = 10s
|
||||
|
||||
# Snapshot after this number of events. See RetentionCriteria.
|
||||
snapshot-every = 1000
|
||||
|
||||
# Number of snapshots to keep. See RetentionCriteria.
|
||||
keep-n-snapshots = 2
|
||||
|
||||
# Delete events after snapshotting. See RetentionCriteria.
|
||||
delete-events = on
|
||||
|
||||
# Cleanup entries that haven't be used for this duration.
|
||||
cleanup-unused-after = 3600s
|
||||
|
||||
# The journal plugin to use, by default it will use the plugin configured by
|
||||
# `akka.persistence.journal.plugin`.
|
||||
journal-plugin-id = ""
|
||||
|
||||
# The journal plugin to use, by default it will use the plugin configured by
|
||||
# `akka.persistence.snapshot-store.plugin`.
|
||||
snapshot-plugin-id = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,290 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.persistence.typed.delivery
|
||||
|
||||
import java.time.{ Duration => JavaDuration }
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.util.JavaDurationConverters._
|
||||
import akka.actor.typed.ActorSystem
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.SupervisorStrategy
|
||||
import akka.actor.typed.delivery.DurableProducerQueue
|
||||
import akka.actor.typed.scaladsl.ActorContext
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.annotation.ApiMayChange
|
||||
import akka.persistence.typed.PersistenceId
|
||||
import akka.persistence.typed.delivery.EventSourcedProducerQueue.CleanupTick
|
||||
import akka.persistence.typed.scaladsl.Effect
|
||||
import akka.persistence.typed.scaladsl.EventSourcedBehavior
|
||||
import akka.persistence.typed.scaladsl.RetentionCriteria
|
||||
import com.typesafe.config.Config
|
||||
|
||||
/**
|
||||
* [[DurableProducerQueue]] that can be used with [[akka.actor.typed.delivery.ProducerController]]
|
||||
* for reliable delivery of messages. It is implemented with event sourcing and stores one
|
||||
* event before sending the message to the destination and one event for the confirmation
|
||||
* that the message has been delivered and processed.
|
||||
*
|
||||
* The [[DurableProducerQueue.LoadState]] request is used at startup to retrieve the unconfirmed messages.
|
||||
*/
|
||||
@ApiMayChange
|
||||
object EventSourcedProducerQueue {
|
||||
import DurableProducerQueue._
|
||||
|
||||
object Settings {
|
||||
|
||||
/**
|
||||
* Scala API: Factory method from config `akka.reliable-delivery.producer-controller.event-sourced-durable-queue`
|
||||
* of the `ActorSystem`.
|
||||
*/
|
||||
def apply(system: ActorSystem[_]): Settings =
|
||||
apply(system.settings.config.getConfig("akka.reliable-delivery.producer-controller.event-sourced-durable-queue"))
|
||||
|
||||
/**
|
||||
* Scala API: Factory method from Config corresponding to
|
||||
* `akka.reliable-delivery.producer-controller.event-sourced-durable-queue`.
|
||||
*/
|
||||
def apply(config: Config): Settings = {
|
||||
new Settings(
|
||||
restartMaxBackoff = config.getDuration("restart-max-backoff").asScala,
|
||||
snapshotEvery = config.getInt("snapshot-every"),
|
||||
keepNSnapshots = config.getInt("keep-n-snapshots"),
|
||||
deleteEvents = config.getBoolean("delete-events"),
|
||||
cleanupUnusedAfter = config.getDuration("cleanup-unused-after").asScala,
|
||||
journalPluginId = config.getString("journal-plugin-id"),
|
||||
snapshotPluginId = config.getString("snapshot-plugin-id"))
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API: Factory method from config `akka.reliable-delivery.producer-controller.event-sourced-durable-queue`
|
||||
* of the `ActorSystem`.
|
||||
*/
|
||||
def create(system: ActorSystem[_]): Settings =
|
||||
apply(system)
|
||||
|
||||
/**
|
||||
* Java API: Factory method from Config corresponding to
|
||||
* `akka.reliable-delivery.producer-controller.event-sourced-durable-queue`.
|
||||
*/
|
||||
def create(config: Config): Settings =
|
||||
apply(config)
|
||||
}
|
||||
|
||||
final class Settings private (
|
||||
val restartMaxBackoff: FiniteDuration,
|
||||
val snapshotEvery: Int,
|
||||
val keepNSnapshots: Int,
|
||||
val deleteEvents: Boolean,
|
||||
val cleanupUnusedAfter: FiniteDuration,
|
||||
val journalPluginId: String,
|
||||
val snapshotPluginId: String) {
|
||||
|
||||
def withSnapshotEvery(newSnapshotEvery: Int): Settings =
|
||||
copy(snapshotEvery = newSnapshotEvery)
|
||||
|
||||
def withKeepNSnapshots(newKeepNSnapshots: Int): Settings =
|
||||
copy(keepNSnapshots = newKeepNSnapshots)
|
||||
|
||||
def withDeleteEvents(newDeleteEvents: Boolean): Settings =
|
||||
copy(deleteEvents = newDeleteEvents)
|
||||
|
||||
/**
|
||||
* Scala API
|
||||
*/
|
||||
def withRestartMaxBackoff(newRestartMaxBackoff: FiniteDuration): Settings =
|
||||
copy(restartMaxBackoff = newRestartMaxBackoff)
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def withRestartMaxBackoff(newRestartMaxBackoff: JavaDuration): Settings =
|
||||
copy(restartMaxBackoff = newRestartMaxBackoff.asScala)
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def getRestartMaxBackoff(): JavaDuration =
|
||||
restartMaxBackoff.asJava
|
||||
|
||||
/**
|
||||
* Scala API
|
||||
*/
|
||||
def withCleanupUnusedAfter(newCleanupUnusedAfter: FiniteDuration): Settings =
|
||||
copy(cleanupUnusedAfter = newCleanupUnusedAfter)
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def withCleanupUnusedAfter(newCleanupUnusedAfter: JavaDuration): Settings =
|
||||
copy(cleanupUnusedAfter = newCleanupUnusedAfter.asScala)
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def getCleanupUnusedAfter(): JavaDuration =
|
||||
cleanupUnusedAfter.asJava
|
||||
|
||||
def withJournalPluginId(id: String): Settings =
|
||||
copy(journalPluginId = id)
|
||||
|
||||
def withSnapshotPluginId(id: String): Settings =
|
||||
copy(snapshotPluginId = id)
|
||||
|
||||
/**
|
||||
* Private copy method for internal use only.
|
||||
*/
|
||||
private def copy(
|
||||
restartMaxBackoff: FiniteDuration = restartMaxBackoff,
|
||||
snapshotEvery: Int = snapshotEvery,
|
||||
keepNSnapshots: Int = keepNSnapshots,
|
||||
deleteEvents: Boolean = deleteEvents,
|
||||
cleanupUnusedAfter: FiniteDuration = cleanupUnusedAfter,
|
||||
journalPluginId: String = journalPluginId,
|
||||
snapshotPluginId: String = snapshotPluginId) =
|
||||
new Settings(
|
||||
restartMaxBackoff,
|
||||
snapshotEvery,
|
||||
keepNSnapshots,
|
||||
deleteEvents,
|
||||
cleanupUnusedAfter,
|
||||
journalPluginId,
|
||||
snapshotPluginId)
|
||||
|
||||
override def toString: String =
|
||||
s"Settings($restartMaxBackoff,$snapshotEvery,$keepNSnapshots,$deleteEvents,$cleanupUnusedAfter,$journalPluginId,$snapshotPluginId)"
|
||||
}
|
||||
|
||||
private case class CleanupTick[A]() extends DurableProducerQueue.Command[A]
|
||||
|
||||
def apply[A](persistenceId: PersistenceId): Behavior[DurableProducerQueue.Command[A]] = {
|
||||
Behaviors.setup { context =>
|
||||
apply(persistenceId, Settings(context.system))
|
||||
}
|
||||
}
|
||||
|
||||
def apply[A](persistenceId: PersistenceId, settings: Settings): Behavior[DurableProducerQueue.Command[A]] = {
|
||||
Behaviors.setup { context =>
|
||||
context.setLoggerName(classOf[EventSourcedProducerQueue[A]])
|
||||
val impl = new EventSourcedProducerQueue[A](context, settings.cleanupUnusedAfter)
|
||||
|
||||
Behaviors.withTimers { timers =>
|
||||
// for sharding it can become many different confirmation qualifier and this
|
||||
// cleanup task is removing qualifiers from `state.confirmedSeqNr` that have not been used for a while
|
||||
context.self ! CleanupTick[A]()
|
||||
timers.startTimerWithFixedDelay(CleanupTick[A](), settings.cleanupUnusedAfter / 2)
|
||||
|
||||
val retentionCriteria = RetentionCriteria.snapshotEvery(
|
||||
numberOfEvents = settings.snapshotEvery,
|
||||
keepNSnapshots = settings.keepNSnapshots)
|
||||
val retentionCriteria2 =
|
||||
if (settings.deleteEvents) retentionCriteria.withDeleteEventsOnSnapshot else retentionCriteria
|
||||
|
||||
EventSourcedBehavior[Command[A], Event, State[A]](
|
||||
persistenceId,
|
||||
State.empty,
|
||||
(state, command) => impl.onCommand(state, command),
|
||||
(state, event) => impl.onEvent(state, event))
|
||||
.withRetention(retentionCriteria2)
|
||||
.withJournalPluginId(settings.journalPluginId)
|
||||
.withSnapshotPluginId(settings.snapshotPluginId)
|
||||
.onPersistFailure(SupervisorStrategy
|
||||
.restartWithBackoff(1.second.min(settings.restartMaxBackoff), settings.restartMaxBackoff, 0.1))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def create[A](persistenceId: PersistenceId): Behavior[DurableProducerQueue.Command[A]] =
|
||||
apply(persistenceId)
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
def create[A](persistenceId: PersistenceId, settings: Settings): Behavior[DurableProducerQueue.Command[A]] =
|
||||
apply(persistenceId, settings)
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
private class EventSourcedProducerQueue[A](
|
||||
context: ActorContext[DurableProducerQueue.Command[A]],
|
||||
cleanupUnusedAfter: FiniteDuration) {
|
||||
import DurableProducerQueue._
|
||||
|
||||
def onCommand(state: State[A], command: Command[A]): Effect[Event, State[A]] = {
|
||||
command match {
|
||||
case StoreMessageSent(sent, replyTo) =>
|
||||
if (sent.seqNr == state.currentSeqNr) {
|
||||
context.log.trace(
|
||||
"StoreMessageSent seqNr [{}], confirmationQualifier [{}]",
|
||||
sent.seqNr,
|
||||
sent.confirmationQualifier)
|
||||
Effect.persist(sent).thenReply(replyTo)(_ => StoreMessageSentAck(sent.seqNr))
|
||||
} else if (sent.seqNr == state.currentSeqNr - 1) {
|
||||
// already stored, could be a retry after timout
|
||||
context.log.debug("Duplicate seqNr [{}], currentSeqNr [{}]", sent.seqNr, state.currentSeqNr)
|
||||
Effect.reply(replyTo)(StoreMessageSentAck(sent.seqNr))
|
||||
} else {
|
||||
// may happen after failure
|
||||
context.log.debug("Ignoring unexpected seqNr [{}], currentSeqNr [{}]", sent.seqNr, state.currentSeqNr)
|
||||
Effect.unhandled // no reply, request will timeout
|
||||
}
|
||||
|
||||
case StoreMessageConfirmed(seqNr, confirmationQualifier, timestampMillis) =>
|
||||
context.log.trace("StoreMessageConfirmed seqNr [{}], confirmationQualifier [{}]", seqNr, confirmationQualifier)
|
||||
val previousConfirmedSeqNr = state.confirmedSeqNr.get(confirmationQualifier) match {
|
||||
case Some((nr, _)) => nr
|
||||
case None => 0L
|
||||
}
|
||||
if (seqNr > previousConfirmedSeqNr)
|
||||
Effect.persist(Confirmed(seqNr, confirmationQualifier, timestampMillis))
|
||||
else
|
||||
Effect.none // duplicate
|
||||
|
||||
case LoadState(replyTo) =>
|
||||
Effect.reply(replyTo)(state)
|
||||
|
||||
case _: CleanupTick[_] =>
|
||||
val now = System.currentTimeMillis()
|
||||
val old = state.confirmedSeqNr.collect {
|
||||
case (confirmationQualifier, (_, timestampMillis))
|
||||
if (now - timestampMillis) >= cleanupUnusedAfter.toMillis && !state.unconfirmed.exists(
|
||||
_.confirmationQualifier != confirmationQualifier) =>
|
||||
confirmationQualifier
|
||||
}.toSet
|
||||
if (old.isEmpty) {
|
||||
Effect.none
|
||||
} else {
|
||||
if (context.log.isDebugEnabled)
|
||||
context.log.debug("Cleanup [{}]", old.mkString(","))
|
||||
Effect.persist(DurableProducerQueue.Cleanup(old))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def onEvent(state: State[A], event: Event): State[A] = {
|
||||
event match {
|
||||
case sent: MessageSent[A] @unchecked =>
|
||||
state.copy(currentSeqNr = sent.seqNr + 1, unconfirmed = state.unconfirmed :+ sent)
|
||||
case Confirmed(seqNr, confirmationQualifier, timestampMillis) =>
|
||||
val newUnconfirmed = state.unconfirmed.filterNot { u =>
|
||||
u.confirmationQualifier == confirmationQualifier && u.seqNr <= seqNr
|
||||
}
|
||||
state.copy(
|
||||
highestConfirmedSeqNr = math.max(state.highestConfirmedSeqNr, seqNr),
|
||||
confirmedSeqNr = state.confirmedSeqNr.updated(confirmationQualifier, (seqNr, timestampMillis)),
|
||||
unconfirmed = newUnconfirmed)
|
||||
case Cleanup(confirmationQualifiers) =>
|
||||
state.copy(confirmedSeqNr = state.confirmedSeqNr -- confirmationQualifiers)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -4,9 +4,6 @@
|
|||
<statusListener class="ch.qos.logback.core.status.NopStatusListener" />
|
||||
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<encoder>
|
||||
<pattern>%date{ISO8601} %-5level %logger %marker - %msg {%mdc}%n</pattern>
|
||||
</encoder>
|
||||
|
|
|
|||
|
|
@ -2,14 +2,17 @@
|
|||
* Copyright (C) 2017-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.typed
|
||||
package akka.persistence.typed
|
||||
|
||||
import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
|
||||
import akka.actor.typed.{ ActorRef, Behavior }
|
||||
import akka.persistence.typed.scaladsl.{ Effect, EventSourcedBehavior }
|
||||
import akka.actor.testkit.typed.scaladsl.TestProbe
|
||||
import akka.actor.testkit.typed.scaladsl.LogCapturing
|
||||
import akka.persistence.typed.PersistenceId
|
||||
import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
|
||||
import akka.actor.testkit.typed.scaladsl.TestProbe
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.cluster.typed.ClusterSingleton
|
||||
import akka.cluster.typed.SingletonActor
|
||||
import akka.persistence.typed.scaladsl.Effect
|
||||
import akka.persistence.typed.scaladsl.EventSourcedBehavior
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
|
|
@ -0,0 +1,284 @@
|
|||
/*
|
||||
* Copyright (C) 2017-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.persistence.typed.delivery
|
||||
|
||||
import java.util.UUID
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.actor.testkit.typed.scaladsl._
|
||||
import akka.actor.typed.eventstream.EventStream
|
||||
import akka.actor.typed.delivery.DurableProducerQueue.Confirmed
|
||||
import akka.actor.typed.delivery.DurableProducerQueue.LoadState
|
||||
import akka.actor.typed.delivery.DurableProducerQueue.MessageSent
|
||||
import akka.actor.typed.delivery.DurableProducerQueue.NoQualifier
|
||||
import akka.actor.typed.delivery.DurableProducerQueue.State
|
||||
import akka.actor.typed.delivery.DurableProducerQueue.StoreMessageConfirmed
|
||||
import akka.actor.typed.delivery.DurableProducerQueue.StoreMessageSent
|
||||
import akka.actor.typed.delivery.DurableProducerQueue.StoreMessageSentAck
|
||||
import akka.persistence.journal.inmem.InmemJournal
|
||||
import akka.persistence.typed.PersistenceId
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
object EventSourcedProducerQueueSpec {
|
||||
def conf: Config =
|
||||
ConfigFactory.parseString(s"""
|
||||
akka.persistence.journal.plugin = "akka.persistence.journal.inmem"
|
||||
akka.persistence.journal.inmem.test-serialization = on
|
||||
akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local"
|
||||
akka.persistence.snapshot-store.local.dir = "target/EventSourcedDurableProducerQueueSpec-${UUID
|
||||
.randomUUID()
|
||||
.toString}"
|
||||
""")
|
||||
}
|
||||
|
||||
class EventSourcedProducerQueueSpec
|
||||
extends ScalaTestWithActorTestKit(ReliableDeliveryWithEventSourcedProducerQueueSpec.conf)
|
||||
with AnyWordSpecLike
|
||||
with LogCapturing {
|
||||
|
||||
private val pidCounter = new AtomicInteger(0)
|
||||
private def nextPid(): PersistenceId = PersistenceId.ofUniqueId(s"pid-${pidCounter.incrementAndGet()})")
|
||||
|
||||
private val journalOperations = createTestProbe[InmemJournal.Operation]()
|
||||
system.eventStream ! EventStream.Subscribe(journalOperations.ref)
|
||||
|
||||
private val stateProbe = createTestProbe[State[String]]()
|
||||
|
||||
"EventSourcedDurableProducerQueue" must {
|
||||
|
||||
"persist MessageSent" in {
|
||||
val pid = nextPid()
|
||||
val ackProbe = createTestProbe[StoreMessageSentAck]()
|
||||
val queue = spawn(EventSourcedProducerQueue[String](pid))
|
||||
val timestamp = System.currentTimeMillis()
|
||||
|
||||
val msg1 = MessageSent(seqNr = 1, "a", ack = true, NoQualifier, timestamp)
|
||||
queue ! StoreMessageSent(msg1, ackProbe.ref)
|
||||
ackProbe.expectMessage(StoreMessageSentAck(storedSeqNr = 1))
|
||||
journalOperations.expectMessage(InmemJournal.Write(msg1, pid.id, 1))
|
||||
|
||||
val msg2 = MessageSent(seqNr = 2, "b", ack = true, NoQualifier, timestamp)
|
||||
queue ! StoreMessageSent(msg2, ackProbe.ref)
|
||||
ackProbe.expectMessage(StoreMessageSentAck(storedSeqNr = 2))
|
||||
journalOperations.expectMessage(InmemJournal.Write(msg2, pid.id, 2))
|
||||
|
||||
queue ! LoadState(stateProbe.ref)
|
||||
val expectedState =
|
||||
State(currentSeqNr = 3, highestConfirmedSeqNr = 0, confirmedSeqNr = Map.empty, unconfirmed = Vector(msg1, msg2))
|
||||
stateProbe.expectMessage(expectedState)
|
||||
|
||||
// replay
|
||||
testKit.stop(queue)
|
||||
val queue2 = spawn(EventSourcedProducerQueue[String](pid))
|
||||
queue2 ! LoadState(stateProbe.ref)
|
||||
stateProbe.expectMessage(expectedState)
|
||||
}
|
||||
|
||||
"not persist MessageSent if lower seqNr than already stored" in {
|
||||
val pid = nextPid()
|
||||
val ackProbe = createTestProbe[StoreMessageSentAck]()
|
||||
val queue = spawn(EventSourcedProducerQueue[String](pid))
|
||||
val timestamp = System.currentTimeMillis()
|
||||
|
||||
val msg1 = MessageSent(seqNr = 1, "a", ack = true, NoQualifier, timestamp)
|
||||
queue ! StoreMessageSent(msg1, ackProbe.ref)
|
||||
ackProbe.expectMessage(StoreMessageSentAck(storedSeqNr = 1))
|
||||
journalOperations.expectMessage(InmemJournal.Write(msg1, pid.id, 1))
|
||||
|
||||
val msg2 = MessageSent(seqNr = 2, "b", ack = true, NoQualifier, timestamp)
|
||||
queue ! StoreMessageSent(msg2, ackProbe.ref)
|
||||
ackProbe.expectMessage(StoreMessageSentAck(storedSeqNr = 2))
|
||||
journalOperations.expectMessage(InmemJournal.Write(msg2, pid.id, 2))
|
||||
|
||||
// duplicate msg2
|
||||
queue ! StoreMessageSent(msg2, ackProbe.ref)
|
||||
ackProbe.expectMessage(StoreMessageSentAck(storedSeqNr = 2))
|
||||
journalOperations.expectNoMessage()
|
||||
|
||||
// further back is ignored
|
||||
queue ! StoreMessageSent(msg1, ackProbe.ref)
|
||||
ackProbe.expectNoMessage()
|
||||
journalOperations.expectNoMessage()
|
||||
}
|
||||
|
||||
"persist Confirmed" in {
|
||||
val pid = nextPid()
|
||||
val ackProbe = createTestProbe[StoreMessageSentAck]()
|
||||
val queue = spawn(EventSourcedProducerQueue[String](pid))
|
||||
val timestamp = System.currentTimeMillis()
|
||||
|
||||
val msg1 = MessageSent(seqNr = 1, "a", ack = true, NoQualifier, timestamp)
|
||||
queue ! StoreMessageSent(msg1, ackProbe.ref)
|
||||
journalOperations.expectMessage(InmemJournal.Write(msg1, pid.id, 1))
|
||||
|
||||
val msg2 = MessageSent(seqNr = 2, "b", ack = true, NoQualifier, timestamp)
|
||||
queue ! StoreMessageSent(msg2, ackProbe.ref)
|
||||
journalOperations.expectMessage(InmemJournal.Write(msg2, pid.id, 2))
|
||||
|
||||
val msg3 = MessageSent(seqNr = 3, "c", ack = true, NoQualifier, timestamp)
|
||||
queue ! StoreMessageSent(msg3, ackProbe.ref)
|
||||
journalOperations.expectMessage(InmemJournal.Write(msg3, pid.id, 3))
|
||||
|
||||
val timestamp2 = System.currentTimeMillis()
|
||||
queue ! StoreMessageConfirmed(seqNr = 2, NoQualifier, timestamp2)
|
||||
journalOperations.expectMessage(InmemJournal.Write(Confirmed(seqNr = 2, NoQualifier, timestamp2), pid.id, 4))
|
||||
|
||||
queue ! LoadState(stateProbe.ref)
|
||||
// note that msg1 is also confirmed (removed) by the confirmation of msg2
|
||||
val expectedState =
|
||||
State(
|
||||
currentSeqNr = 4,
|
||||
highestConfirmedSeqNr = 2,
|
||||
confirmedSeqNr = Map(NoQualifier -> (2L -> timestamp2)),
|
||||
unconfirmed = Vector(msg3))
|
||||
stateProbe.expectMessage(expectedState)
|
||||
|
||||
// replay
|
||||
testKit.stop(queue)
|
||||
val queue2 = spawn(EventSourcedProducerQueue[String](pid))
|
||||
queue2 ! LoadState(stateProbe.ref)
|
||||
stateProbe.expectMessage(expectedState)
|
||||
}
|
||||
|
||||
"not persist Confirmed with lower seqNr than already confirmed" in {
|
||||
val pid = nextPid()
|
||||
val ackProbe = createTestProbe[StoreMessageSentAck]()
|
||||
val queue = spawn(EventSourcedProducerQueue[String](pid))
|
||||
val timestamp = System.currentTimeMillis()
|
||||
|
||||
val msg1 = MessageSent(seqNr = 1, "a", ack = true, NoQualifier, timestamp)
|
||||
queue ! StoreMessageSent(msg1, ackProbe.ref)
|
||||
journalOperations.expectMessage(InmemJournal.Write(msg1, pid.id, 1))
|
||||
|
||||
val msg2 = MessageSent(seqNr = 2, "b", ack = true, NoQualifier, timestamp)
|
||||
queue ! StoreMessageSent(msg2, ackProbe.ref)
|
||||
journalOperations.expectMessage(InmemJournal.Write(msg2, pid.id, 2))
|
||||
|
||||
val timestamp2 = System.currentTimeMillis()
|
||||
queue ! StoreMessageConfirmed(seqNr = 2, NoQualifier, timestamp2)
|
||||
journalOperations.expectMessage(InmemJournal.Write(Confirmed(seqNr = 2, NoQualifier, timestamp2), pid.id, 3))
|
||||
|
||||
// lower
|
||||
queue ! StoreMessageConfirmed(seqNr = 1, NoQualifier, timestamp2)
|
||||
journalOperations.expectNoMessage()
|
||||
|
||||
// duplicate
|
||||
queue ! StoreMessageConfirmed(seqNr = 2, NoQualifier, timestamp2)
|
||||
journalOperations.expectNoMessage()
|
||||
}
|
||||
|
||||
"keep track of confirmations per confirmationQualifier" in {
|
||||
val pid = nextPid()
|
||||
val ackProbe = createTestProbe[StoreMessageSentAck]()
|
||||
val queue = spawn(EventSourcedProducerQueue[String](pid))
|
||||
val timestamp = System.currentTimeMillis()
|
||||
|
||||
val msg1 = MessageSent(seqNr = 1, "a", ack = true, confirmationQualifier = "q1", timestamp)
|
||||
queue ! StoreMessageSent(msg1, ackProbe.ref)
|
||||
journalOperations.expectMessage(InmemJournal.Write(msg1, pid.id, 1))
|
||||
|
||||
val msg2 = MessageSent(seqNr = 2, "b", ack = true, confirmationQualifier = "q1", timestamp)
|
||||
queue ! StoreMessageSent(msg2, ackProbe.ref)
|
||||
journalOperations.expectMessage(InmemJournal.Write(msg2, pid.id, 2))
|
||||
|
||||
val msg3 = MessageSent(seqNr = 3, "c", ack = true, "q2", timestamp)
|
||||
queue ! StoreMessageSent(msg3, ackProbe.ref)
|
||||
journalOperations.expectMessage(InmemJournal.Write(msg3, pid.id, 3))
|
||||
|
||||
val msg4 = MessageSent(seqNr = 4, "d", ack = true, "q2", timestamp)
|
||||
queue ! StoreMessageSent(msg4, ackProbe.ref)
|
||||
journalOperations.expectMessage(InmemJournal.Write(msg4, pid.id, 4))
|
||||
|
||||
val msg5 = MessageSent(seqNr = 5, "e", ack = true, "q2", timestamp)
|
||||
queue ! StoreMessageSent(msg5, ackProbe.ref)
|
||||
journalOperations.expectMessage(InmemJournal.Write(msg5, pid.id, 5))
|
||||
|
||||
val timestamp2 = System.currentTimeMillis()
|
||||
queue ! StoreMessageConfirmed(seqNr = 4, "q2", timestamp2)
|
||||
journalOperations.expectMessage(InmemJournal.Write(Confirmed(seqNr = 4, "q2", timestamp2), pid.id, 6))
|
||||
|
||||
queue ! LoadState(stateProbe.ref)
|
||||
// note that msg3 is also confirmed (removed) by the confirmation of msg4, same qualifier
|
||||
// but msg1 and msg2 are still unconfirmed
|
||||
val expectedState =
|
||||
State(
|
||||
currentSeqNr = 6,
|
||||
highestConfirmedSeqNr = 4,
|
||||
confirmedSeqNr = Map("q2" -> (4L -> timestamp2)),
|
||||
unconfirmed = Vector(msg1, msg2, msg5))
|
||||
stateProbe.expectMessage(expectedState)
|
||||
|
||||
// replay
|
||||
testKit.stop(queue)
|
||||
val queue2 = spawn(EventSourcedProducerQueue[String](pid))
|
||||
queue2 ! LoadState(stateProbe.ref)
|
||||
stateProbe.expectMessage(expectedState)
|
||||
}
|
||||
|
||||
"cleanup old confirmationQualifier entries" in {
|
||||
val pid = nextPid()
|
||||
val ackProbe = createTestProbe[StoreMessageSentAck]()
|
||||
val settings = EventSourcedProducerQueue.Settings(system).withCleanupUnusedAfter(100.millis)
|
||||
val queue = spawn(EventSourcedProducerQueue[String](pid, settings))
|
||||
val now = System.currentTimeMillis()
|
||||
val timestamp0 = now - 70000
|
||||
|
||||
val msg1 = MessageSent(seqNr = 1, "a", ack = true, confirmationQualifier = "q1", timestamp0)
|
||||
queue ! StoreMessageSent(msg1, ackProbe.ref)
|
||||
|
||||
val msg2 = MessageSent(seqNr = 2, "b", ack = true, confirmationQualifier = "q1", timestamp0)
|
||||
queue ! StoreMessageSent(msg2, ackProbe.ref)
|
||||
|
||||
val msg3 = MessageSent(seqNr = 3, "c", ack = true, "q2", timestamp0)
|
||||
queue ! StoreMessageSent(msg3, ackProbe.ref)
|
||||
|
||||
val msg4 = MessageSent(seqNr = 4, "d", ack = true, "q2", timestamp0)
|
||||
queue ! StoreMessageSent(msg4, ackProbe.ref)
|
||||
|
||||
val timestamp1 = now - 60000
|
||||
queue ! StoreMessageConfirmed(seqNr = 1, "q1", timestamp1)
|
||||
|
||||
// cleanup tick
|
||||
Thread.sleep(1000)
|
||||
|
||||
// q1, seqNr 2 is not confirmed yet, so q1 entries shouldn't be cleaned yet
|
||||
queue ! LoadState(stateProbe.ref)
|
||||
|
||||
val expectedState1 =
|
||||
State(
|
||||
currentSeqNr = 5,
|
||||
highestConfirmedSeqNr = 1,
|
||||
confirmedSeqNr = Map("q1" -> (1L -> timestamp1)),
|
||||
unconfirmed = Vector(msg2, msg3, msg4))
|
||||
stateProbe.expectMessage(expectedState1)
|
||||
|
||||
val timestamp2 = now - 50000
|
||||
queue ! StoreMessageConfirmed(seqNr = 2, "q1", timestamp2)
|
||||
|
||||
val timestamp3 = now + 10000 // not old
|
||||
queue ! StoreMessageConfirmed(seqNr = 4, "q2", timestamp3)
|
||||
|
||||
// cleanup tick
|
||||
Thread.sleep(1000)
|
||||
|
||||
// all q1 confirmed and old timestamp, so q1 entries should be cleaned
|
||||
queue ! LoadState(stateProbe.ref)
|
||||
|
||||
val expectedState2 =
|
||||
State[String](
|
||||
currentSeqNr = 5,
|
||||
highestConfirmedSeqNr = 4,
|
||||
confirmedSeqNr = Map("q2" -> (4L -> timestamp3)),
|
||||
unconfirmed = Vector.empty)
|
||||
stateProbe.expectMessage(expectedState2)
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,169 @@
|
|||
/*
|
||||
* Copyright (C) 2017-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.persistence.typed.delivery
|
||||
|
||||
import java.util.UUID
|
||||
|
||||
import akka.actor.testkit.typed.scaladsl._
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.delivery.ProducerController
|
||||
import akka.persistence.typed.PersistenceId
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
object ReliableDeliveryWithEventSourcedProducerQueueSpec {
|
||||
def conf: Config =
|
||||
ConfigFactory.parseString(s"""
|
||||
akka.persistence.journal.plugin = "akka.persistence.journal.inmem"
|
||||
akka.persistence.journal.inmem.test-serialization = on
|
||||
akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local"
|
||||
akka.persistence.snapshot-store.local.dir = "target/ProducerControllerWithEventSourcedProducerQueueSpec-${UUID
|
||||
.randomUUID()
|
||||
.toString}"
|
||||
akka.reliable-delivery.consumer-controller.flow-control-window = 20
|
||||
""")
|
||||
}
|
||||
|
||||
class ReliableDeliveryWithEventSourcedProducerQueueSpec
|
||||
extends ScalaTestWithActorTestKit(WorkPullingWithEventSourcedProducerQueueSpec.conf)
|
||||
with AnyWordSpecLike
|
||||
with LogCapturing {
|
||||
|
||||
"ReliableDelivery with EventSourcedProducerQueue" must {
|
||||
|
||||
"deliver messages after full producer and consumer restart" in {
|
||||
val producerId = "p1"
|
||||
val producerProbe = createTestProbe[ProducerController.RequestNext[String]]()
|
||||
|
||||
val producerController = spawn(
|
||||
ProducerController[String](
|
||||
producerId,
|
||||
Some(EventSourcedProducerQueue[String](PersistenceId.ofUniqueId(producerId)))))
|
||||
producerController ! ProducerController.Start(producerProbe.ref)
|
||||
|
||||
val consumerController = spawn(ConsumerController[String]())
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[String]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
consumerController ! ConsumerController.RegisterToProducerController(producerController)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! "a"
|
||||
producerProbe.receiveMessage().sendNextTo ! "b"
|
||||
producerProbe.receiveMessage().sendNextTo ! "c"
|
||||
producerProbe.receiveMessage()
|
||||
|
||||
consumerProbe.receiveMessage().message should ===("a")
|
||||
|
||||
system.log.info("Stopping [{}]", producerController)
|
||||
testKit.stop(producerController)
|
||||
producerProbe.expectTerminated(producerController)
|
||||
testKit.stop(consumerController)
|
||||
consumerProbe.expectTerminated(consumerController)
|
||||
|
||||
val producerController2 = spawn(
|
||||
ProducerController[String](
|
||||
producerId,
|
||||
Some(EventSourcedProducerQueue[String](PersistenceId.ofUniqueId(producerId)))))
|
||||
producerController2 ! ProducerController.Start(producerProbe.ref)
|
||||
|
||||
val consumerController2 = spawn(ConsumerController[String]())
|
||||
consumerController2 ! ConsumerController.Start(consumerProbe.ref)
|
||||
consumerController2 ! ConsumerController.RegisterToProducerController(producerController2)
|
||||
|
||||
val delivery1 = consumerProbe.receiveMessage()
|
||||
delivery1.message should ===("a")
|
||||
delivery1.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val delivery2 = consumerProbe.receiveMessage()
|
||||
delivery2.message should ===("b")
|
||||
delivery2.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val delivery3 = consumerProbe.receiveMessage()
|
||||
delivery3.message should ===("c")
|
||||
delivery3.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val requestNext4 = producerProbe.receiveMessage()
|
||||
requestNext4.currentSeqNr should ===(4)
|
||||
requestNext4.sendNextTo ! "d"
|
||||
|
||||
val delivery4 = consumerProbe.receiveMessage()
|
||||
delivery4.message should ===("d")
|
||||
delivery4.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
testKit.stop(producerController2)
|
||||
testKit.stop(consumerController2)
|
||||
}
|
||||
|
||||
"deliver messages after producer restart, keeping same ConsumerController" in {
|
||||
val producerId = "p2"
|
||||
val producerProbe = createTestProbe[ProducerController.RequestNext[String]]()
|
||||
|
||||
val producerController = spawn(
|
||||
ProducerController[String](
|
||||
producerId,
|
||||
Some(EventSourcedProducerQueue[String](PersistenceId.ofUniqueId(producerId)))))
|
||||
producerController ! ProducerController.Start(producerProbe.ref)
|
||||
|
||||
val consumerController = spawn(ConsumerController[String]())
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[String]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
consumerController ! ConsumerController.RegisterToProducerController(producerController)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! "a"
|
||||
producerProbe.receiveMessage().sendNextTo ! "b"
|
||||
producerProbe.receiveMessage().sendNextTo ! "c"
|
||||
producerProbe.receiveMessage()
|
||||
|
||||
val delivery1 = consumerProbe.receiveMessage()
|
||||
delivery1.message should ===("a")
|
||||
|
||||
system.log.info("Stopping [{}]", producerController)
|
||||
testKit.stop(producerController)
|
||||
|
||||
consumerProbe.expectTerminated(producerController)
|
||||
|
||||
val producerController2 = spawn(
|
||||
ProducerController[String](
|
||||
producerId,
|
||||
Some(EventSourcedProducerQueue[String](PersistenceId.ofUniqueId(producerId)))))
|
||||
producerController2 ! ProducerController.Start(producerProbe.ref)
|
||||
consumerController ! ConsumerController.RegisterToProducerController(producerController2)
|
||||
|
||||
delivery1.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val requestNext4 = producerProbe.receiveMessage()
|
||||
requestNext4.currentSeqNr should ===(4)
|
||||
requestNext4.sendNextTo ! "d"
|
||||
|
||||
// TODO Should we try harder to deduplicate first?
|
||||
val redelivery1 = consumerProbe.receiveMessage()
|
||||
redelivery1.message should ===("a")
|
||||
redelivery1.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! "e"
|
||||
|
||||
val redelivery2 = consumerProbe.receiveMessage()
|
||||
redelivery2.message should ===("b")
|
||||
redelivery2.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val redelivery3 = consumerProbe.receiveMessage()
|
||||
redelivery3.message should ===("c")
|
||||
redelivery3.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val delivery4 = consumerProbe.receiveMessage()
|
||||
delivery4.message should ===("d")
|
||||
delivery4.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val delivery5 = consumerProbe.receiveMessage()
|
||||
delivery5.message should ===("e")
|
||||
delivery5.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
testKit.stop(producerController2)
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,263 @@
|
|||
/*
|
||||
* Copyright (C) 2017-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.persistence.typed.delivery
|
||||
|
||||
import java.util.UUID
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
|
||||
import akka.actor.testkit.typed.FishingOutcome
|
||||
import akka.actor.testkit.typed.scaladsl._
|
||||
import akka.actor.typed.delivery.ConsumerController
|
||||
import akka.actor.typed.delivery.WorkPullingProducerController
|
||||
import akka.actor.typed.receptionist.ServiceKey
|
||||
import akka.persistence.typed.PersistenceId
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
object WorkPullingWithEventSourcedProducerQueueSpec {
|
||||
def conf: Config =
|
||||
ConfigFactory.parseString(s"""
|
||||
akka.persistence.journal.plugin = "akka.persistence.journal.inmem"
|
||||
akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local"
|
||||
akka.persistence.snapshot-store.local.dir = "target/WorkPullingWithEventSourcedProducerQueueSpec-${UUID
|
||||
.randomUUID()
|
||||
.toString}"
|
||||
akka.reliable-delivery.consumer-controller.flow-control-window = 20
|
||||
""")
|
||||
}
|
||||
|
||||
class WorkPullingWithEventSourcedProducerQueueSpec
|
||||
extends ScalaTestWithActorTestKit(WorkPullingWithEventSourcedProducerQueueSpec.conf)
|
||||
with AnyWordSpecLike
|
||||
with LogCapturing {
|
||||
|
||||
private val idCounter = new AtomicInteger(0)
|
||||
private def nextId(): String = s"${idCounter.incrementAndGet()}"
|
||||
|
||||
private def workerServiceKey(): ServiceKey[ConsumerController.Command[String]] =
|
||||
ServiceKey(s"worker-${idCounter.get}")
|
||||
|
||||
"WorkPulling with EventSourcedProducerQueue" must {
|
||||
|
||||
"deliver messages after full producer and consumer restart" in {
|
||||
val producerId = s"p${nextId()}"
|
||||
val serviceKey = workerServiceKey()
|
||||
val producerProbe = createTestProbe[WorkPullingProducerController.RequestNext[String]]()
|
||||
|
||||
val producerController = spawn(
|
||||
WorkPullingProducerController[String](
|
||||
producerId,
|
||||
serviceKey,
|
||||
Some(EventSourcedProducerQueue[String](PersistenceId.ofUniqueId(producerId)))))
|
||||
producerController ! WorkPullingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val consumerController = spawn(ConsumerController[String](serviceKey))
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[String]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! "a"
|
||||
producerProbe.receiveMessage().sendNextTo ! "b"
|
||||
producerProbe.receiveMessage().sendNextTo ! "c"
|
||||
producerProbe.receiveMessage()
|
||||
|
||||
consumerProbe.receiveMessage().message should ===("a")
|
||||
|
||||
system.log.info("Stopping [{}]", producerController)
|
||||
testKit.stop(producerController)
|
||||
producerProbe.expectTerminated(producerController)
|
||||
testKit.stop(consumerController)
|
||||
consumerProbe.expectTerminated(consumerController)
|
||||
|
||||
val producerController2 = spawn(
|
||||
WorkPullingProducerController[String](
|
||||
producerId,
|
||||
serviceKey,
|
||||
Some(EventSourcedProducerQueue[String](PersistenceId.ofUniqueId(producerId)))))
|
||||
producerController2 ! WorkPullingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val consumerController2 = spawn(ConsumerController[String](serviceKey))
|
||||
consumerController2 ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
val delivery1 = consumerProbe.receiveMessage()
|
||||
delivery1.message should ===("a")
|
||||
delivery1.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val delivery2 = consumerProbe.receiveMessage()
|
||||
delivery2.message should ===("b")
|
||||
delivery2.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val delivery3 = consumerProbe.receiveMessage()
|
||||
delivery3.message should ===("c")
|
||||
delivery3.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val requestNext4 = producerProbe.receiveMessage()
|
||||
requestNext4.sendNextTo ! "d"
|
||||
|
||||
val delivery4 = consumerProbe.receiveMessage()
|
||||
delivery4.message should ===("d")
|
||||
delivery4.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
testKit.stop(producerController2)
|
||||
testKit.stop(consumerController2)
|
||||
}
|
||||
|
||||
"deliver messages after producer restart, keeping same ConsumerController" in {
|
||||
val producerId = s"p${nextId()}"
|
||||
val serviceKey = workerServiceKey()
|
||||
val producerProbe = createTestProbe[WorkPullingProducerController.RequestNext[String]]()
|
||||
|
||||
val producerController = spawn(
|
||||
WorkPullingProducerController[String](
|
||||
producerId,
|
||||
serviceKey,
|
||||
Some(EventSourcedProducerQueue[String](PersistenceId.ofUniqueId(producerId)))))
|
||||
producerController ! WorkPullingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val consumerController = spawn(ConsumerController[String](serviceKey))
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[String]]()
|
||||
consumerController ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! "a"
|
||||
producerProbe.receiveMessage().sendNextTo ! "b"
|
||||
producerProbe.receiveMessage().sendNextTo ! "c"
|
||||
producerProbe.receiveMessage()
|
||||
|
||||
val delivery1 = consumerProbe.receiveMessage()
|
||||
delivery1.message should ===("a")
|
||||
|
||||
system.log.info("Stopping [{}]", producerController)
|
||||
testKit.stop(producerController)
|
||||
|
||||
val producerController2 = spawn(
|
||||
WorkPullingProducerController[String](
|
||||
producerId,
|
||||
serviceKey,
|
||||
Some(EventSourcedProducerQueue[String](PersistenceId.ofUniqueId(producerId)))))
|
||||
producerController2 ! WorkPullingProducerController.Start(producerProbe.ref)
|
||||
|
||||
// Delivery in flight from old dead WorkPullingProducerController, confirmation will not be stored
|
||||
delivery1.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
// from old, buffered in ConsumerController
|
||||
val delivery2 = consumerProbe.receiveMessage()
|
||||
delivery2.message should ===("b")
|
||||
delivery2.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
// from old, buffered in ConsumerController
|
||||
val delivery3 = consumerProbe.receiveMessage()
|
||||
delivery3.message should ===("c")
|
||||
delivery3.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val requestNext4 = producerProbe.receiveMessage()
|
||||
requestNext4.sendNextTo ! "d"
|
||||
|
||||
// TODO Should we try harder to deduplicate first?
|
||||
val redelivery1 = consumerProbe.receiveMessage()
|
||||
redelivery1.message should ===("a")
|
||||
redelivery1.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
producerProbe.receiveMessage().sendNextTo ! "e"
|
||||
|
||||
val redelivery2 = consumerProbe.receiveMessage()
|
||||
redelivery2.message should ===("b")
|
||||
redelivery2.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val redelivery3 = consumerProbe.receiveMessage()
|
||||
redelivery3.message should ===("c")
|
||||
redelivery3.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val delivery4 = consumerProbe.receiveMessage()
|
||||
delivery4.message should ===("d")
|
||||
delivery4.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
val delivery5 = consumerProbe.receiveMessage()
|
||||
delivery5.message should ===("e")
|
||||
delivery5.confirmTo ! ConsumerController.Confirmed
|
||||
|
||||
testKit.stop(producerController2)
|
||||
testKit.stop(consumerController)
|
||||
}
|
||||
|
||||
"deliver messages after restart, when using several workers" in {
|
||||
val producerId = s"p${nextId()}"
|
||||
val serviceKey = workerServiceKey()
|
||||
val producerProbe = createTestProbe[WorkPullingProducerController.RequestNext[String]]()
|
||||
|
||||
val producerController = spawn(
|
||||
WorkPullingProducerController[String](
|
||||
producerId,
|
||||
serviceKey,
|
||||
Some(EventSourcedProducerQueue[String](PersistenceId.ofUniqueId(producerId)))))
|
||||
producerController ! WorkPullingProducerController.Start(producerProbe.ref)
|
||||
|
||||
// same consumerProbe for all workers, since we can't know the routing
|
||||
val consumerProbe = createTestProbe[ConsumerController.Delivery[String]]()
|
||||
var received = Vector.empty[ConsumerController.Delivery[String]]
|
||||
|
||||
val consumerController1 = spawn(ConsumerController[String](serviceKey))
|
||||
consumerController1 ! ConsumerController.Start(consumerProbe.ref)
|
||||
val consumerController2 = spawn(ConsumerController[String](serviceKey))
|
||||
consumerController2 ! ConsumerController.Start(consumerProbe.ref)
|
||||
val consumerController3 = spawn(ConsumerController[String](serviceKey))
|
||||
consumerController3 ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
val batch1 = 15
|
||||
val confirmed1 = 10
|
||||
(1 to batch1).foreach { n =>
|
||||
producerProbe.receiveMessage().sendNextTo ! s"msg-$n"
|
||||
}
|
||||
|
||||
(1 to confirmed1).foreach { _ =>
|
||||
received :+= consumerProbe.receiveMessage()
|
||||
received.last.confirmTo ! ConsumerController.Confirmed
|
||||
}
|
||||
|
||||
system.log.debug("Workers received [{}]", received.mkString(", "))
|
||||
received.map(_.message).toSet.size should ===(confirmed1)
|
||||
|
||||
producerProbe.receiveMessage()
|
||||
|
||||
system.log.info("Stopping [{}]", producerController)
|
||||
testKit.stop(producerController)
|
||||
system.log.info("Stopping [{}]", consumerController2)
|
||||
testKit.stop(consumerController2)
|
||||
|
||||
val consumerController4 = spawn(ConsumerController[String](serviceKey))
|
||||
consumerController4 ! ConsumerController.Start(consumerProbe.ref)
|
||||
|
||||
val producerController2 = spawn(
|
||||
WorkPullingProducerController[String](
|
||||
producerId,
|
||||
serviceKey,
|
||||
Some(EventSourcedProducerQueue[String](PersistenceId.ofUniqueId(producerId)))))
|
||||
producerController2 ! WorkPullingProducerController.Start(producerProbe.ref)
|
||||
|
||||
val batch2 = 5
|
||||
(batch1 + 1 to batch1 + batch2).foreach { n =>
|
||||
producerProbe.receiveMessage().sendNextTo ! s"msg-$n"
|
||||
}
|
||||
|
||||
consumerProbe.fishForMessage(consumerProbe.remainingOrDefault) { delivery =>
|
||||
received :+= delivery
|
||||
delivery.confirmTo ! ConsumerController.Confirmed
|
||||
if (received.map(_.message).toSet.size == batch1 + batch2)
|
||||
FishingOutcome.Complete
|
||||
else
|
||||
FishingOutcome.Continue
|
||||
}
|
||||
|
||||
system.log.debug("Workers received [{}]", received.mkString(", "))
|
||||
received.map(_.message).toSet should ===((1 to batch1 + batch2).map(n => s"msg-$n").toSet)
|
||||
|
||||
testKit.stop(producerController2)
|
||||
testKit.stop(consumerController1)
|
||||
testKit.stop(consumerController3)
|
||||
testKit.stop(consumerController4)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -395,7 +395,8 @@ lazy val persistenceTyped = akkaModule("akka-persistence-typed")
|
|||
actorTyped,
|
||||
persistence % "compile->compile;test->test",
|
||||
persistenceQuery % "test",
|
||||
actorTypedTests % "test->test",
|
||||
actorTestkitTyped % "test->test",
|
||||
clusterTyped % "test->test",
|
||||
actorTestkitTyped % "test->test",
|
||||
jackson % "test->test")
|
||||
.settings(javacOptions += "-parameters") // for Jackson
|
||||
|
|
@ -409,12 +410,13 @@ lazy val clusterTyped = akkaModule("akka-cluster-typed")
|
|||
cluster % "compile->compile;test->test;multi-jvm->multi-jvm",
|
||||
clusterTools,
|
||||
distributedData,
|
||||
persistence % "test->test",
|
||||
persistenceTyped % "test->test",
|
||||
actorTestkitTyped % "test->test",
|
||||
actorTypedTests % "test->test",
|
||||
remoteTests % "test->test",
|
||||
jackson % "test->test")
|
||||
.settings(Protobuf.settings)
|
||||
// To be able to import ContainerFormats.proto
|
||||
.settings(Protobuf.importPath := Some(baseDirectory.value / ".." / "akka-remote" / "src" / "main" / "protobuf"))
|
||||
.settings(AutomaticModuleName.settings("akka.cluster.typed"))
|
||||
.settings(Protobuf.settings)
|
||||
// To be able to import ContainerFormats.proto
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue