Update scalariform (#23778) (#23783)

This commit is contained in:
Arnout Engelen 2017-10-06 10:30:28 +02:00 committed by GitHub
parent 63ccdeec16
commit b1df13d4d4
221 changed files with 1528 additions and 1580 deletions

View file

@ -20,12 +20,12 @@ class InetAddressDnsResolver(cache: SimpleDnsCache, config: Config) extends Acto
// Controls the cache policy for successful lookups only
private final val CachePolicyProp = "networkaddress.cache.ttl"
// Deprecated JVM property key, keeping for legacy compatibility; replaced by CachePolicyProp
// Deprecated JVM property key, keeping for legacy compatibility; replaced by CachePolicyProp
private final val CachePolicyPropFallback = "sun.net.inetaddr.ttl"
// Controls the cache policy for negative lookups only
private final val NegativeCachePolicyProp = "networkaddress.cache.negative.ttl"
// Deprecated JVM property key, keeping for legacy compatibility; replaced by NegativeCachePolicyProp
// Deprecated JVM property key, keeping for legacy compatibility; replaced by NegativeCachePolicyProp
private final val NegativeCachePolicyPropFallback = "sun.net.inetaddr.negative.ttl"
// default values (-1 and 0 are magic numbers, trust them)

View file

@ -124,4 +124,4 @@ private[io] object TcpOutgoingConnection {
def connectTimeoutExpired(timeout: Option[FiniteDuration]) =
new ConnectException(s"Connect timeout of $timeout expired") with NoStackTrace
}
}

View file

@ -32,7 +32,7 @@ case object OptimalSizeExploringResizer {
/**
* INTERNAL API
*/
private[routing]type PoolSize = Int
private[routing] type PoolSize = Int
/**
* INTERNAL API
@ -51,7 +51,7 @@ case object OptimalSizeExploringResizer {
/**
* INTERNAL API
*/
private[routing]type PerformanceLog = Map[PoolSize, Duration]
private[routing] type PerformanceLog = Map[PoolSize, Duration]
def apply(resizerCfg: Config): OptimalSizeExploringResizer =
DefaultOptimalSizeExploringResizer(

View file

@ -149,8 +149,8 @@ private[akka] class RouterActor extends Actor {
val routingLogicController: Option[ActorRef] = cell.routerConfig.routingLogicController(
cell.router.logic).map(props context.actorOf(
props.withDispatcher(context.props.dispatcher),
name = "routingLogicController"))
props.withDispatcher(context.props.dispatcher),
name = "routingLogicController"))
def receive = {
case GetRoutees

View file

@ -667,7 +667,7 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz
// *must* be overridden by derived classes. This construction is necessary
// to specialize the return type, as the method is already implemented in
// a parent trait.
//
//
// Avoid `iterator` in performance sensitive code, call ops directly on ByteString instead
override def iterator: ByteIterator = throw new UnsupportedOperationException("Method iterator is not implemented in ByteString")
@ -742,7 +742,7 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz
* @param buffer a ByteBuffer to copy bytes to
* @return the number of bytes actually copied
*/
// *must* be overridden by derived classes.
// *must* be overridden by derived classes.
def copyToBuffer(buffer: ByteBuffer): Int = throw new UnsupportedOperationException("Method copyToBuffer is not implemented in ByteString")
/**

View file

@ -9,10 +9,10 @@ object BenchRunner {
import scala.collection.JavaConverters._
val args2 = args.toList.flatMap {
case "quick" => "-i 1 -wi 1 -f1 -t1".split(" ").toList
case "full" => "-i 10 -wi 4 -f3 -t1".split(" ").toList
case "jitwatch" => "-jvmArgs=-XX:+UnlockDiagnosticVMOptions -XX:+TraceClassLoading -XX:+LogCompilation" :: Nil
case other => other :: Nil
case "quick" "-i 1 -wi 1 -f1 -t1".split(" ").toList
case "full" "-i 10 -wi 4 -f3 -t1".split(" ").toList
case "jitwatch" "-jvmArgs=-XX:+UnlockDiagnosticVMOptions -XX:+TraceClassLoading -XX:+LogCompilation" :: Nil
case other other :: Nil
}
val opts = new CommandLineOptions(args2: _*)
@ -20,7 +20,7 @@ object BenchRunner {
val report = results.asScala.map { result: RunResult
val bench = result.getParams.getBenchmark
val params = result.getParams.getParamsKeys.asScala.map(key => s"$key=${result.getParams.getParam(key)}").mkString("_")
val params = result.getParams.getParamsKeys.asScala.map(key s"$key=${result.getParams.getParam(key)}").mkString("_")
val score = result.getAggregatedResult.getPrimaryResult.getScore.round
val unit = result.getAggregatedResult.getPrimaryResult.getScoreUnit
s"\t${bench}_${params}\t$score\t$unit"

View file

@ -11,7 +11,7 @@ import org.openjdk.jmh.annotations.Fork
import org.openjdk.jmh.annotations.Measurement
import org.openjdk.jmh.annotations.Mode
import org.openjdk.jmh.annotations.OutputTimeUnit
import org.openjdk.jmh.annotations.{ Scope => JmhScope }
import org.openjdk.jmh.annotations.{ Scope JmhScope }
import org.openjdk.jmh.annotations.State
import org.openjdk.jmh.annotations.Warmup

View file

@ -39,8 +39,8 @@ class AffinityPoolComparativeBenchmark {
requireRightNumberOfCores(cores)
val mailboxConf = mailbox match {
case "default" => ""
case "SingleConsumerOnlyUnboundedMailbox" =>
case "default" ""
case "SingleConsumerOnlyUnboundedMailbox"
s"""default-mailbox.mailbox-type = "${classOf[akka.dispatch.SingleConsumerOnlyUnboundedMailbox].getName}""""
}

View file

@ -43,8 +43,8 @@ class AffinityPoolRequestResponseBenchmark {
requireRightNumberOfCores(cores)
val mailboxConf = mailbox match {
case "default" => ""
case "SingleConsumerOnlyUnboundedMailbox" =>
case "default" ""
case "SingleConsumerOnlyUnboundedMailbox"
s"""default-mailbox.mailbox-type = "${classOf[akka.dispatch.SingleConsumerOnlyUnboundedMailbox].getName}""""
}

View file

@ -19,7 +19,7 @@ object BenchmarkActors {
class PingPong(val messagesPerPair: Int, latch: CountDownLatch) extends Actor {
var left = messagesPerPair / 2
def receive = {
case Message =>
case Message
if (left == 0) {
latch.countDown()
@ -37,7 +37,7 @@ object BenchmarkActors {
class Echo extends Actor {
def receive = {
case Message =>
case Message
sender() ! Message
}
}
@ -54,7 +54,7 @@ object BenchmarkActors {
private var batch = 0
def receive = {
case Message =>
case Message
batch -= 1
if (batch <= 0) {
if (!sendBatch()) {
@ -81,9 +81,9 @@ object BenchmarkActors {
class Pipe(next: Option[ActorRef]) extends Actor {
def receive = {
case Message =>
case Message
if (next.isDefined) next.get forward Message
case Stop =>
case Stop
context stop self
if (next.isDefined) next.get forward Stop
}
@ -97,7 +97,7 @@ object BenchmarkActors {
val fullPathToDispatcher = "akka.actor." + dispatcher
val latch = new CountDownLatch(numPairs * 2)
val actors = for {
i <- (1 to numPairs).toVector
i (1 to numPairs).toVector
} yield {
val ping = system.actorOf(PingPong.props(messagesPerPair, latch).withDispatcher(fullPathToDispatcher))
val pong = system.actorOf(PingPong.props(messagesPerPair, latch).withDispatcher(fullPathToDispatcher))
@ -108,19 +108,19 @@ object BenchmarkActors {
private def initiatePingPongForPairs(refs: Vector[(ActorRef, ActorRef)], inFlight: Int) = {
for {
(ping, pong) <- refs
_ <- 1 to inFlight
(ping, pong) refs
_ 1 to inFlight
} {
ping.tell(Message, pong)
}
}
private def startEchoActorPairs(messagesPerPair: Int, numPairs: Int, dispatcher: String,
batchSize: Int)(implicit system: ActorSystem) = {
batchSize: Int)(implicit system: ActorSystem) = {
val fullPathToDispatcher = "akka.actor." + dispatcher
val latch = new CountDownLatch(numPairs)
val actors = (1 to numPairs).map { _ =>
val actors = (1 to numPairs).map { _
system.actorOf(EchoSender.props(messagesPerPair, latch, batchSize).withDispatcher(fullPathToDispatcher))
}.toVector
(actors, latch)

View file

@ -38,8 +38,8 @@ class ForkJoinActorBenchmark {
requireRightNumberOfCores(cores)
val mailboxConf = mailbox match {
case "default" => ""
case "SingleConsumerOnlyUnboundedMailbox" =>
case "default" ""
case "SingleConsumerOnlyUnboundedMailbox"
s"""default-mailbox.mailbox-type = "${classOf[akka.dispatch.SingleConsumerOnlyUnboundedMailbox].getName}""""
}
@ -133,4 +133,4 @@ object ForkJoinActorBenchmark {
final val totalMessagesLessThanCores = (lessThanCoresActors * messages) / 2
final val totalMessagesSameAsCores = (sameAsCoresActors * messages) / 2
}
}

View file

@ -20,7 +20,7 @@ object RequestResponseActors {
private val randGenerator = new Random()
override def receive: Receive = {
case u: User => {
case u: User {
receivedUsers.put(u.userId, u)
if (left == 0) {
latch.countDown()
@ -42,10 +42,10 @@ object RequestResponseActors {
class UserServiceActor(userDb: Map[Int, User], latch: CountDownLatch, numQueries: Int) extends Actor {
private var left = numQueries
def receive = {
case Request(id) =>
case Request(id)
userDb.get(id) match {
case Some(u) => sender() ! u
case None =>
case Some(u) sender() ! u
case None
}
if (left == 0) {
latch.countDown()
@ -60,11 +60,11 @@ object RequestResponseActors {
def props(latch: CountDownLatch, numQueries: Int, numUsersInDB: Int) = {
val r = new Random()
val users = for {
id <- 0 until numUsersInDB
id 0 until numUsersInDB
firstName = r.nextString(5)
lastName = r.nextString(7)
ssn = r.nextInt()
friendIds = for { _ <- 0 until 5 } yield r.nextInt(numUsersInDB)
friendIds = for { _ 0 until 5 } yield r.nextInt(numUsersInDB)
} yield id -> User(id, firstName, lastName, ssn, friendIds)
Props(new UserServiceActor(users.toMap, latch, numQueries))
}
@ -74,7 +74,7 @@ object RequestResponseActors {
val fullPathToDispatcher = "akka.actor." + dispatcher
val latch = new CountDownLatch(numActors)
val actorsPairs = for {
i <- (1 to (numActors / 2)).toVector
i (1 to (numActors / 2)).toVector
userQueryActor = system.actorOf(UserQueryActor.props(latch, numQueriesPerActor, numUsersInDBPerActor).withDispatcher(fullPathToDispatcher))
userServiceActor = system.actorOf(UserServiceActor.props(latch, numQueriesPerActor, numUsersInDBPerActor).withDispatcher(fullPathToDispatcher))
} yield (userQueryActor, userServiceActor)
@ -83,8 +83,8 @@ object RequestResponseActors {
def initiateQuerySimulation(requestResponseActorPairs: Seq[(ActorRef, ActorRef)], inFlight: Int) = {
for {
(queryActor, serviceActor) <- requestResponseActorPairs
i <- 1 to inFlight
(queryActor, serviceActor) requestResponseActorPairs
i 1 to inFlight
} {
serviceActor.tell(Request(i), queryActor)
}

View file

@ -13,7 +13,7 @@ import java.util.concurrent.TimeUnit
object StashCreationBenchmark {
class StashingActor extends Actor with Stash {
def receive = {
case msg => sender() ! msg
case msg sender() ! msg
}
}

View file

@ -121,27 +121,27 @@ object TellOnlyBenchmark {
}
class DroppingDispatcher(
_configurator: MessageDispatcherConfigurator,
_id: String,
_throughput: Int,
_throughputDeadlineTime: Duration,
_configurator: MessageDispatcherConfigurator,
_id: String,
_throughput: Int,
_throughputDeadlineTime: Duration,
_executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
_shutdownTimeout: FiniteDuration
_shutdownTimeout: FiniteDuration
)
extends Dispatcher(_configurator, _id, _throughput, _throughputDeadlineTime, _executorServiceFactoryProvider, _shutdownTimeout) {
extends Dispatcher(_configurator, _id, _throughput, _throughputDeadlineTime, _executorServiceFactoryProvider, _shutdownTimeout) {
override protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope): Unit = {
val mbox = receiver.mailbox
mbox.enqueue(receiver.self, invocation)
mbox.messageQueue match {
case mb: DroppingMessageQueue if mb.dropping // do nothing
case _ registerForExecution(mbox, true, false)
case _ registerForExecution(mbox, true, false)
}
}
}
class DroppingDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites)
extends MessageDispatcherConfigurator(config, prerequisites) {
extends MessageDispatcherConfigurator(config, prerequisites) {
override def dispatcher(): MessageDispatcher = new DroppingDispatcher(
this,

View file

@ -10,7 +10,7 @@ import org.openjdk.jmh.annotations.Fork
import org.openjdk.jmh.annotations.Measurement
import org.openjdk.jmh.annotations.Mode
import org.openjdk.jmh.annotations.OutputTimeUnit
import org.openjdk.jmh.annotations.{ Scope => JmhScope }
import org.openjdk.jmh.annotations.{ Scope JmhScope }
import org.openjdk.jmh.annotations.State
import org.openjdk.jmh.annotations.Warmup
import akka.cluster.UniqueAddress
@ -49,7 +49,7 @@ class ORSetMergeBenchmark {
@Setup(Level.Trial)
def setup(): Unit = {
set1 = (1 to set1Size).foldLeft(ORSet.empty[String])((s, n) => s.add(nextNode(), "elem" + n))
set1 = (1 to set1Size).foldLeft(ORSet.empty[String])((s, n) s.add(nextNode(), "elem" + n))
addFromSameNode = set1.add(nodeA, "elem" + set1Size + 1).merge(set1)
addFromOtherNode = set1.add(nodeB, "elem" + set1Size + 1).merge(set1)
complex1 = set1.add(nodeB, "a").add(nodeC, "b").remove(nodeD, "elem" + set1Size).merge(set1)

View file

@ -10,7 +10,7 @@ import org.openjdk.jmh.annotations.Fork
import org.openjdk.jmh.annotations.Measurement
import org.openjdk.jmh.annotations.Mode
import org.openjdk.jmh.annotations.OutputTimeUnit
import org.openjdk.jmh.annotations.{ Scope => JmhScope }
import org.openjdk.jmh.annotations.{ Scope JmhScope }
import org.openjdk.jmh.annotations.State
import org.openjdk.jmh.annotations.Warmup
import akka.cluster.UniqueAddress
@ -46,7 +46,7 @@ class VersionVectorBenchmark {
@Setup(Level.Trial)
def setup(): Unit = {
vv1 = (1 to size).foldLeft(VersionVector.empty)((vv, n) => vv + nextNode())
vv1 = (1 to size).foldLeft(VersionVector.empty)((vv, n) vv + nextNode())
vv2 = vv1 + nextNode()
vv3 = vv1 + nextNode()
dot1 = VersionVector(nodeA, vv1.versionAt(nodeA))

View file

@ -41,8 +41,8 @@ mailbox {
implicit val sys = ActorSystem("ANQ", config)
val ref = sys.actorOf(Props(new Actor {
def receive = {
case Stop => sender() ! Stop
case _ =>
case Stop sender() ! Stop
case _
}
}).withDispatcher("dispatcher").withMailbox("mailbox"), "receiver")

View file

@ -8,7 +8,7 @@ import akka.actor.Actor
/** only as a "the best we could possibly get" baseline, does not persist anything */
class BaselineActor(respondAfter: Int) extends Actor {
override def receive = {
case n: Int => if (n == respondAfter) sender() ! n
case n: Int if (n == respondAfter) sender() ! n
}
}

View file

@ -65,7 +65,7 @@ class PersistentActorDeferBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
def tell_persistAsync_defer_persistAsync_reply(): Unit = {
for (i <- data10k) persistAsync_defer.tell(i, probe.ref)
for (i data10k) persistAsync_defer.tell(i, probe.ref)
probe.expectMsg(data10k.last)
}
@ -73,7 +73,7 @@ class PersistentActorDeferBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
def tell_persistAsync_defer_persistAsync_replyASAP(): Unit = {
for (i <- data10k) persistAsync_defer_replyASAP.tell(i, probe.ref)
for (i data10k) persistAsync_defer_replyASAP.tell(i, probe.ref)
probe.expectMsg(data10k.last)
}
@ -85,12 +85,12 @@ class `persistAsync, defer`(respondAfter: Int) extends PersistentActor {
override def persistenceId: String = self.path.name
override def receiveCommand = {
case n: Int =>
persistAsync(Evt(n)) { e => }
deferAsync(Evt(n)) { e => if (e.i == respondAfter) sender() ! e.i }
case n: Int
persistAsync(Evt(n)) { e }
deferAsync(Evt(n)) { e if (e.i == respondAfter) sender() ! e.i }
}
override def receiveRecover = {
case _ => // do nothing
case _ // do nothing
}
}
class `persistAsync, defer, respond ASAP`(respondAfter: Int) extends PersistentActor {
@ -98,12 +98,12 @@ class `persistAsync, defer, respond ASAP`(respondAfter: Int) extends PersistentA
override def persistenceId: String = self.path.name
override def receiveCommand = {
case n: Int =>
persistAsync(Evt(n)) { e => }
deferAsync(Evt(n)) { e => }
case n: Int
persistAsync(Evt(n)) { e }
deferAsync(Evt(n)) { e }
if (n == respondAfter) sender() ! n
}
override def receiveRecover = {
case _ => // do nothing
case _ // do nothing
}
}

View file

@ -63,7 +63,7 @@ class PersistentActorThroughputBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
def actor_normalActor_reply_baseline(): Unit = {
for (i <- data10k) actor.tell(i, probe.ref)
for (i data10k) actor.tell(i, probe.ref)
probe.expectMsg(data10k.last)
}
@ -71,7 +71,7 @@ class PersistentActorThroughputBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
def persistentActor_persist_reply(): Unit = {
for (i <- data10k) persistPersistentActor.tell(i, probe.ref)
for (i data10k) persistPersistentActor.tell(i, probe.ref)
probe.expectMsg(Evt(data10k.last))
}
@ -79,7 +79,7 @@ class PersistentActorThroughputBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
def persistentActor_persistAsync_reply(): Unit = {
for (i <- data10k) persistAsync1PersistentActor.tell(i, probe.ref)
for (i data10k) persistAsync1PersistentActor.tell(i, probe.ref)
probe.expectMsg(Evt(data10k.last))
}
@ -87,7 +87,7 @@ class PersistentActorThroughputBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
def persistentActor_noPersist_reply(): Unit = {
for (i <- data10k) noPersistPersistentActor.tell(i, probe.ref)
for (i data10k) noPersistPersistentActor.tell(i, probe.ref)
probe.expectMsg(Evt(data10k.last))
}
@ -95,7 +95,7 @@ class PersistentActorThroughputBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
def persistentActor_persistAsync_replyRightOnCommandReceive(): Unit = {
for (i <- data10k) persistAsyncQuickReplyPersistentActor.tell(i, probe.ref)
for (i data10k) persistAsyncQuickReplyPersistentActor.tell(i, probe.ref)
probe.expectMsg(Evt(data10k.last))
}
@ -107,10 +107,10 @@ class NoPersistPersistentActor(respondAfter: Int) extends PersistentActor {
override def persistenceId: String = self.path.name
override def receiveCommand = {
case n: Int => if (n == respondAfter) sender() ! Evt(n)
case n: Int if (n == respondAfter) sender() ! Evt(n)
}
override def receiveRecover = {
case _ => // do nothing
case _ // do nothing
}
}
@ -119,10 +119,10 @@ class PersistPersistentActor(respondAfter: Int) extends PersistentActor {
override def persistenceId: String = self.path.name
override def receiveCommand = {
case n: Int => persist(Evt(n)) { e => if (e.i == respondAfter) sender() ! e }
case n: Int persist(Evt(n)) { e if (e.i == respondAfter) sender() ! e }
}
override def receiveRecover = {
case _ => // do nothing
case _ // do nothing
}
}
@ -131,11 +131,11 @@ class PersistAsyncPersistentActor(respondAfter: Int) extends PersistentActor {
override def persistenceId: String = self.path.name
override def receiveCommand = {
case n: Int =>
persistAsync(Evt(n)) { e => if (e.i == respondAfter) sender() ! e }
case n: Int
persistAsync(Evt(n)) { e if (e.i == respondAfter) sender() ! e }
}
override def receiveRecover = {
case _ => // do nothing
case _ // do nothing
}
}
@ -144,12 +144,12 @@ class PersistAsyncQuickReplyPersistentActor(respondAfter: Int) extends Persisten
override def persistenceId: String = self.path.name
override def receiveCommand = {
case n: Int =>
case n: Int
val e = Evt(n)
if (n == respondAfter) sender() ! e
persistAsync(e)(identity)
}
override def receiveRecover = {
case _ => // do nothing
case _ // do nothing
}
}

View file

@ -62,7 +62,7 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
def persistentActor_persistAsync_with_AtLeastOnceDelivery(): Unit = {
for (i <- 1 to dataCount)
for (i 1 to dataCount)
persistAsyncPersistentActorWithAtLeastOnceDelivery.tell(i, probe.ref)
probe.expectMsg(20.seconds, Evt(dataCount))
}
@ -70,7 +70,7 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
def persistentActor_persist_with_AtLeastOnceDelivery(): Unit = {
for (i <- 1 to dataCount)
for (i 1 to dataCount)
persistPersistentActorWithAtLeastOnceDelivery.tell(i, probe.ref)
probe.expectMsg(2.minutes, Evt(dataCount))
}
@ -78,7 +78,7 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
def persistentActor_noPersist_with_AtLeastOnceDelivery(): Unit = {
for (i <- 1 to dataCount)
for (i 1 to dataCount)
noPersistPersistentActorWithAtLeastOnceDelivery.tell(i, probe.ref)
probe.expectMsg(20.seconds, Evt(dataCount))
}
@ -91,28 +91,28 @@ class NoPersistPersistentActorWithAtLeastOnceDelivery(respondAfter: Int, val upS
override def persistenceId: String = self.path.name
override def receiveCommand = {
case n: Int =>
deliver(downStream)(deliveryId => Msg(deliveryId, n))
case n: Int
deliver(downStream)(deliveryId Msg(deliveryId, n))
if (n == respondAfter)
//switch to wait all message confirmed
context.become(waitConfirm)
case Confirm(deliveryId) =>
case Confirm(deliveryId)
confirmDelivery(deliveryId)
case _ => // do nothing
case _ // do nothing
}
override def receiveRecover = {
case _ => // do nothing
case _ // do nothing
}
val waitConfirm: Actor.Receive = {
case Confirm(deliveryId) =>
case Confirm(deliveryId)
confirmDelivery(deliveryId)
if (numberOfUnconfirmed == 0) {
upStream ! Evt(respondAfter)
context.unbecome()
}
case _ => // do nothing
case _ // do nothing
}
}
@ -123,30 +123,30 @@ class PersistPersistentActorWithAtLeastOnceDelivery(respondAfter: Int, val upStr
override def persistenceId: String = self.path.name
override def receiveCommand = {
case n: Int =>
persist(MsgSent(n)) { e =>
deliver(downStream)(deliveryId => Msg(deliveryId, n))
case n: Int
persist(MsgSent(n)) { e
deliver(downStream)(deliveryId Msg(deliveryId, n))
if (n == respondAfter)
//switch to wait all message confirmed
context.become(waitConfirm)
}
case Confirm(deliveryId) =>
case Confirm(deliveryId)
confirmDelivery(deliveryId)
case _ => // do nothing
case _ // do nothing
}
override def receiveRecover = {
case _ => // do nothing
case _ // do nothing
}
val waitConfirm: Actor.Receive = {
case Confirm(deliveryId) =>
case Confirm(deliveryId)
confirmDelivery(deliveryId)
if (numberOfUnconfirmed == 0) {
upStream ! Evt(respondAfter)
context.unbecome()
}
case _ => // do nothing
case _ // do nothing
}
}
@ -157,30 +157,30 @@ class PersistAsyncPersistentActorWithAtLeastOnceDelivery(respondAfter: Int, val
override def persistenceId: String = self.path.name
override def receiveCommand = {
case n: Int =>
persistAsync(MsgSent(n)) { e =>
deliver(downStream)(deliveryId => Msg(deliveryId, n))
case n: Int
persistAsync(MsgSent(n)) { e
deliver(downStream)(deliveryId Msg(deliveryId, n))
if (n == respondAfter)
//switch to wait all message confirmed
context.become(waitConfirm)
}
case Confirm(deliveryId) =>
case Confirm(deliveryId)
confirmDelivery(deliveryId)
case _ => // do nothing
case _ // do nothing
}
override def receiveRecover = {
case _ => // do nothing
case _ // do nothing
}
val waitConfirm: Actor.Receive = {
case Confirm(deliveryId) =>
case Confirm(deliveryId)
confirmDelivery(deliveryId)
if (numberOfUnconfirmed == 0) {
upStream ! Evt(respondAfter)
context.unbecome()
}
case _ => // do nothing
case _ // do nothing
}
}
@ -198,15 +198,15 @@ class DestinationActor extends Actor {
var seqNr = 0L
override def receive = {
case n: Int =>
case n: Int
sender() ! Confirm(n)
case Msg(deliveryId, _) =>
case Msg(deliveryId, _)
seqNr += 1
if (seqNr % 11 == 0) {
//drop it
} else {
sender() ! Confirm(deliveryId)
}
case _ => // do nothing
case _ // do nothing
}
}

View file

@ -18,7 +18,7 @@ import akka.stream.stage.OutHandler
class BenchTestSource(elementCount: Int) extends GraphStage[SourceShape[java.lang.Integer]] {
private val elements = new Array[java.lang.Integer](elementCount)
(1 to elementCount).map(n => elements(n - 1) = n)
(1 to elementCount).map(n elements(n - 1) = n)
val out: Outlet[java.lang.Integer] = Outlet("BenchTestSource")
override val shape: SourceShape[java.lang.Integer] = SourceShape(out)

View file

@ -91,11 +91,11 @@ class CodecBenchmark {
"""
)
val config = configType match {
case RemoteInstrument =>
case RemoteInstrument
ConfigFactory.parseString(
s"""akka.remote.artery.advanced.instruments = [ "${classOf[DummyRemoteInstrument].getName}" ]"""
).withFallback(commonConfig)
case _ =>
case _
commonConfig
}
@ -148,7 +148,7 @@ class CodecBenchmark {
val deserializer: Flow[InboundEnvelope, InboundEnvelope, NotUsed] =
Flow.fromGraph(new Deserializer(inboundContext, system.asInstanceOf[ExtendedActorSystem], envelopePool))
val decoderInput: Flow[String, EnvelopeBuffer, NotUsed] = Flow[String]
.map { _ =>
.map { _
val envelope = envelopePool.acquire()
envelopeTemplateBuffer.rewind()
envelope.byteBuffer.put(envelopeTemplateBuffer)
@ -158,14 +158,14 @@ class CodecBenchmark {
encodeGraph = encoderInput
.via(encoder)
.map(envelope => envelopePool.release(envelope))
.map(envelope envelopePool.release(envelope))
decodeGraph = decoderInput
.via(decoder)
.via(deserializer)
.map {
case env: ReusableInboundEnvelope => inboundEnvelopePool.release(env)
case _ =>
case env: ReusableInboundEnvelope inboundEnvelopePool.release(env)
case _
}
encodeDecodeGraph = encoderInput
@ -173,8 +173,8 @@ class CodecBenchmark {
.via(decoder)
.via(deserializer)
.map {
case env: ReusableInboundEnvelope => inboundEnvelopePool.release(env)
case _ =>
case env: ReusableInboundEnvelope inboundEnvelopePool.release(env)
case _
}
}

View file

@ -45,7 +45,7 @@ class FlightRecorderBench {
@OperationsPerInvocation(10000000)
def flight_recorder_writes(): Unit = {
val latch = new CountDownLatch(writers)
(1 to writers).foreach { _ =>
(1 to writers).foreach { _
val sink = recorder.createEventSink()
new Thread {
override def run(): Unit = {

View file

@ -42,7 +42,7 @@ class LatchSink(countDownAfter: Int, latch: CountDownLatch) extends GraphStage[S
}
class BarrierSink(countDownAfter: Int, latch: CountDownLatch, barrierAfter: Int, barrier: CyclicBarrier)
extends GraphStage[SinkShape[Any]] {
extends GraphStage[SinkShape[Any]] {
val in: Inlet[Any] = Inlet("BarrierSink")
override val shape: SinkShape[Any] = SinkShape(in)

View file

@ -31,7 +31,7 @@ class CountMinSketchBenchmark {
@Setup
def init(): Unit = {
countMinSketch = new CountMinSketch(d, w, seed)
(0 to 8191).foreach { index =>
(0 to 8191).foreach { index
preallocateIds(index) = rand.nextInt()
preallocateValues(index) = Math.abs(rand.nextInt())
}

View file

@ -34,12 +34,12 @@ class EmptySourceBenchmark {
/*
(not serious benchmark, just sanity check: run on macbook 15, late 2013)
While it was a PublisherSource:
[info] EmptySourceBenchmark.empty thrpt 10 11.219 ± 6.498 ops/ms
Rewrite to GraphStage:
[info] EmptySourceBenchmark.empty thrpt 10 17.556 ± 2.865 ops/ms
*/
}

View file

@ -33,11 +33,11 @@ class FlatMapMergeBenchmark {
def setup(): Unit = {
val source = NumberOfStreams match {
// Base line: process NumberOfElements-many elements from a single source without using flatMapMerge
case 0 => createSource(NumberOfElements)
case 0 createSource(NumberOfElements)
// Stream merging: process NumberOfElements-many elements from n sources, each producing (NumberOfElements/n)-many elements
case n =>
case n
val subSource = createSource(NumberOfElements / n)
Source.repeat(()).take(n).flatMapMerge(n, _ => subSource)
Source.repeat(()).take(n).flatMapMerge(n, _ subSource)
}
graph = Source.fromGraph(source).toMat(Sink.ignore)(Keep.right)
}

View file

@ -128,7 +128,7 @@ class FlowMapBenchmark {
}
// source setup
private def mkMaps[O, Mat](source: Source[O, Mat], count: Int)(flow: => Graph[FlowShape[O, O], _]): Source[O, Mat] = {
private def mkMaps[O, Mat](source: Source[O, Mat], count: Int)(flow: Graph[FlowShape[O, O], _]): Source[O, Mat] = {
var f = source
for (i 1 to count)
f = f.via(flow)

View file

@ -116,7 +116,7 @@ class FusedGraphsBenchmark {
materializer = ActorMaterializer(settings)
testElements = Array.fill(ElementCount)(new MutableElement(0))
val addFunc = (x: MutableElement) => { x.value += 1; x }
val addFunc = (x: MutableElement) { x.value += 1; x }
val testSource = Source.fromGraph(new TestSource(testElements))
val testSink = Sink.fromGraph(new JitSafeCompletionLatch)
@ -179,7 +179,7 @@ class FusedGraphsBenchmark {
.take(ElementCount)
.map(addFunc)
.map(addFunc)
.fold(new MutableElement(0))((acc, x) => { acc.value += x.value; acc })
.fold(new MutableElement(0))((acc, x) { acc.value += x.value; acc })
.toMat(testSink)(Keep.right)
)
@ -206,7 +206,7 @@ class FusedGraphsBenchmark {
.toMat(testSink)(Keep.right)
)
val broadcastZipFlow: Flow[MutableElement, MutableElement, NotUsed] = Flow.fromGraph(GraphDSL.create() { implicit b =>
val broadcastZipFlow: Flow[MutableElement, MutableElement, NotUsed] = Flow.fromGraph(GraphDSL.create() { implicit b
import GraphDSL.Implicits._
val bcast = b.add(Broadcast[MutableElement](2))
@ -218,7 +218,7 @@ class FusedGraphsBenchmark {
FlowShape(bcast.in, zip.out.map(_._1).outlet)
})
val balanceMergeFlow: Flow[MutableElement, MutableElement, NotUsed] = Flow.fromGraph(GraphDSL.create() { implicit b =>
val balanceMergeFlow: Flow[MutableElement, MutableElement, NotUsed] = Flow.fromGraph(GraphDSL.create() { implicit b
import GraphDSL.Implicits._
val balance = b.add(Balance[MutableElement](2))

View file

@ -35,7 +35,7 @@ class InterpreterBenchmark {
.connect(identities.last.out, sink)
// FIXME: This should not be here, this is pure setup overhead
for (i <- (0 until identities.size - 1)) {
for (i (0 until identities.size - 1)) {
b.connect(identities(i).out, identities(i + 1).in)
}

View file

@ -16,21 +16,21 @@ import akka.Done
object MaterializationBenchmark {
val flowWithMapBuilder = (numOfCombinators: Int) => {
val flowWithMapBuilder = (numOfCombinators: Int) {
var source = Source.single(())
for (_ <- 1 to numOfCombinators) {
for (_ 1 to numOfCombinators) {
source = source.map(identity)
}
source.to(Sink.ignore)
}
val graphWithJunctionsGradualBuilder = (numOfJunctions: Int) =>
RunnableGraph.fromGraph(GraphDSL.create() { implicit b =>
val graphWithJunctionsGradualBuilder = (numOfJunctions: Int)
RunnableGraph.fromGraph(GraphDSL.create() { implicit b
import GraphDSL.Implicits._
val broadcast = b.add(Broadcast[Unit](numOfJunctions))
var outlet = broadcast.out(0)
for (i <- 1 until numOfJunctions) {
for (i 1 until numOfJunctions) {
val merge = b.add(Merge[Unit](2))
outlet ~> merge
broadcast.out(i) ~> merge
@ -42,13 +42,13 @@ object MaterializationBenchmark {
ClosedShape
})
val graphWithJunctionsImmediateBuilder = (numOfJunctions: Int) =>
RunnableGraph.fromGraph(GraphDSL.create() { implicit b =>
val graphWithJunctionsImmediateBuilder = (numOfJunctions: Int)
RunnableGraph.fromGraph(GraphDSL.create() { implicit b
import GraphDSL.Implicits._
val broadcast = b.add(Broadcast[Unit](numOfJunctions))
val merge = b.add(Merge[Unit](numOfJunctions))
for (i <- 0 until numOfJunctions) {
for (i 0 until numOfJunctions) {
broadcast ~> merge
}
@ -57,12 +57,12 @@ object MaterializationBenchmark {
ClosedShape
})
val graphWithImportedFlowBuilder = (numOfFlows: Int) =>
val graphWithImportedFlowBuilder = (numOfFlows: Int)
RunnableGraph.fromGraph(GraphDSL.create(Source.single(())) { implicit b source
import GraphDSL.Implicits._
val flow = Flow[Unit].map(identity)
var out: Outlet[Unit] = source.out
for (i <- 0 until numOfFlows) {
for (i 0 until numOfFlows) {
val flowShape = b.add(flow)
out ~> flowShape
out = flowShape.outlet
@ -73,11 +73,11 @@ object MaterializationBenchmark {
final val subStreamCount = 10000
val subStreamBuilder: Int => RunnableGraph[Future[Unit]] = numOfCombinators => {
val subStreamBuilder: Int RunnableGraph[Future[Unit]] = numOfCombinators {
val subFlow = {
var flow = Flow[Unit]
for (_ <- 1 to numOfCombinators) {
for (_ 1 to numOfCombinators) {
flow = flow.map(identity)
}
flow

View file

@ -79,11 +79,11 @@ class PartitionHubBenchmark {
val source = testSource
.runWith(PartitionHub.sink[java.lang.Integer](
(size, elem) => elem.intValue % NumberOfStreams,
(size, elem) elem.intValue % NumberOfStreams,
startAfterNrOfConsumers = NumberOfStreams, bufferSize = BufferSize
))(materializer)
for (_ <- 0 until NumberOfStreams)
for (_ 0 until NumberOfStreams)
source.runWith(new LatchSink(N / NumberOfStreams, latch))(materializer)
if (!latch.await(30, TimeUnit.SECONDS)) {
@ -106,7 +106,7 @@ class PartitionHubBenchmark {
))
)(materializer)
for (_ <- 0 until NumberOfStreams)
for (_ 0 until NumberOfStreams)
source.runWith(new LatchSink(N / NumberOfStreams, latch))(materializer)
if (!latch.await(30, TimeUnit.SECONDS)) {

View file

@ -34,7 +34,7 @@ class FileSourcesScaleBenchmark {
val FILES_NUMBER = 40
val files: Seq[Path] = {
val line = ByteString("x" * 2048 + "\n")
(1 to FILES_NUMBER).map(i => {
(1 to FILES_NUMBER).map(i {
val f = Files.createTempFile(getClass.getName, i + ".bench.tmp")
val ft = Source.fromIterator(() Iterator.continually(line))
@ -67,16 +67,16 @@ class FileSourcesScaleBenchmark {
@Benchmark
def flatMapMerge(): Unit = {
val h = Source.fromIterator(() => files.iterator)
.flatMapMerge(FILES_NUMBER, path => FileIO.fromPath(path, bufSize)).runWith(Sink.ignore)
val h = Source.fromIterator(() files.iterator)
.flatMapMerge(FILES_NUMBER, path FileIO.fromPath(path, bufSize)).runWith(Sink.ignore)
Await.result(h, 300.seconds)
}
@Benchmark
def mapAsync(): Unit = {
val h = Source.fromIterator(() => files.iterator)
.mapAsync(FILES_NUMBER)(path => FileIO.fromPath(path, bufSize).runWith(Sink.ignore)).runWith(Sink.ignore)
val h = Source.fromIterator(() files.iterator)
.mapAsync(FILES_NUMBER)(path FileIO.fromPath(path, bufSize).runWith(Sink.ignore)).runWith(Sink.ignore)
Await.result(h, 300.seconds)
}

View file

@ -36,8 +36,8 @@ class ByteString_copyToBuffer_Benchmark {
[info] ByteStringBenchmark.bs_large_copyToBuffer thrpt 40 142 163 289.866 ± 21751578.294 ops/s
[info] ByteStringBenchmark.bss_large_copyToBuffer thrpt 40 1 489 195.631 ± 209165.487 ops/s << that's the interesting case, we needlessly fold and allocate tons of Stream etc
[info] ByteStringBenchmark.bss_large_pc_copyToBuffer thrpt 40 184 466 756.364 ± 9169108.378 ops/s // "can't beat that"
[info] ....[Thread state: RUNNABLE]........................................................................
[info] 35.9% 35.9% scala.collection.Iterator$class.toStream
[info] 20.2% 20.2% scala.collection.immutable.Stream.foldLeft
@ -50,10 +50,10 @@ class ByteString_copyToBuffer_Benchmark {
[info] 1.2% 1.2% akka.util.generated.ByteStringBenchmark_bss_large_copyToBuffer.bss_large_copyToBuffer_thrpt_jmhStub
[info] 0.3% 0.3% akka.util.ByteIterator$MultiByteArrayIterator.copyToBuffer
[info] 1.2% 1.2% <other>
AFTER specializing impls
[info] ....[Thread state: RUNNABLE]........................................................................
[info] 99.5% 99.6% akka.util.generated.ByteStringBenchmark_bss_large_copyToBuffer_jmhTest.bss_large_copyToBuffer_thrpt_jmhStub
[info] 0.1% 0.1% java.util.concurrent.CountDownLatch.countDown
@ -63,12 +63,12 @@ class ByteString_copyToBuffer_Benchmark {
[info] 0.1% 0.1% java.lang.Thread.currentThread
[info] 0.1% 0.1% sun.misc.Unsafe.compareAndSwapInt
[info] 0.1% 0.1% sun.reflect.AccessorGenerator.internalize
[info] Benchmark Mode Cnt Score Error Units
[info] ByteStringBenchmark.bs_large_copyToBuffer thrpt 40 177 328 585.473 ± 7742067.648 ops/s
[info] ByteStringBenchmark.bss_large_copyToBuffer thrpt 40 113 535 003.488 ± 3899763.124 ops/s // previous bad case now very good (was 2M/s)
[info] ByteStringBenchmark.bss_large_pc_copyToBuffer thrpt 40 203 590 896.493 ± 7582752.024 ops/s // "can't beat that"
*/
@Benchmark

View file

@ -25,17 +25,17 @@ class ByteString_decode_Benchmark {
/*
Using Charset helps a bit, but nothing impressive:
[info] ByteString_decode_Benchmark.bc_large_decodeString_stringCharset_utf8 thrpt 20 21 612.293 ± 825.099 ops/s
=>
[info] ByteString_decode_Benchmark.bc_large_decodeString_charsetCharset_utf8 thrpt 20 22 473.372 ± 851.597 ops/s
[info] ByteString_decode_Benchmark.bs_large_decodeString_stringCharset_utf8 thrpt 20 84 443.674 ± 3723.987 ops/s
=>
[info] ByteString_decode_Benchmark.bs_large_decodeString_charsetCharset_utf8 thrpt 20 93 865.033 ± 2052.476 ops/s
[info] ByteString_decode_Benchmark.bss_large_decodeString_stringCharset_utf8 thrpt 20 14 886.553 ± 326.752 ops/s
=>
[info] ByteString_decode_Benchmark.bss_large_decodeString_charsetCharset_utf8 thrpt 20 16 031.670 ± 474.565 ops/s

View file

@ -27,7 +27,7 @@ class ByteString_dropSliceTake_Benchmark {
val bss_pc_large = bss_large.compact
/*
--------------------------------- BASELINE --------------------------------------------------------------------
--------------------------------- BASELINE --------------------------------------------------------------------
[info] Benchmark Mode Cnt Score Error Units
[info] ByteString_dropSliceTake_Benchmark.bs_large_dropRight_100 thrpt 20 111 122 621.983 ± 6172679.160 ops/s
[info] ByteString_dropSliceTake_Benchmark.bs_large_dropRight_256 thrpt 20 110 238 003.870 ± 4042572.908 ops/s
@ -48,9 +48,9 @@ class ByteString_dropSliceTake_Benchmark {
[info] ByteString_dropSliceTake_Benchmark.bs_large_slice_129_129 thrpt 20 105 640 836.625 ± 9112709.942 ops/s
[info] ByteString_dropSliceTake_Benchmark.bss_large_slice_80_80 thrpt 20 10 868 202.262 ± 526537.133 ops/s
[info] ByteString_dropSliceTake_Benchmark.bss_large_slice_129_129 thrpt 20 9 429 199.802 ± 1321542.453 ops/s
--------------------------------- AFTER -----------------------------------------------------------------------
------ TODAY
[info] Benchmark Mode Cnt Score Error Units
[info] ByteString_dropSliceTake_Benchmark.bs_large_dropRight_100 thrpt 20 126 091 961.654 ± 2813125.268 ops/s
@ -59,7 +59,7 @@ class ByteString_dropSliceTake_Benchmark {
[info] ByteString_dropSliceTake_Benchmark.bss_large_dropRight_100 thrpt 20 8 813 065.392 ± 234570.880 ops/s
[info] ByteString_dropSliceTake_Benchmark.bss_large_dropRight_256 thrpt 20 9 039 585.934 ± 297168.301 ops/s
[info] ByteString_dropSliceTake_Benchmark.bss_large_dropRight_2000 thrpt 20 9 629 458.168 ± 124846.904 ops/s
[info] ByteString_dropSliceTake_Benchmark.bs_large_drop_100 thrpt 20 111 666 137.955 ± 4846727.674 ops/s
[info] ByteString_dropSliceTake_Benchmark.bs_large_drop_256 thrpt 20 114 405 514.622 ± 4985750.805 ops/s
[info] ByteString_dropSliceTake_Benchmark.bs_large_drop_2000 thrpt 20 114 364 716.297 ± 2512280.603 ops/s
@ -67,15 +67,15 @@ class ByteString_dropSliceTake_Benchmark {
[info] ByteString_dropSliceTake_Benchmark.bss_large_drop_100 thrpt 20 9 184 934.769 ± 549140.840 ops/s
[info] ByteString_dropSliceTake_Benchmark.bss_large_drop_256 thrpt 20 10 887 437.121 ± 195606.240 ops/s
[info] ByteString_dropSliceTake_Benchmark.bss_large_drop_2000 thrpt 20 10 725 300.292 ± 403470.413 ops/s
[info] ByteString_dropSliceTake_Benchmark.bs_large_slice_80_80 thrpt 20 233 017 314.148 ± 7070246.826 ops/s
[info] ByteString_dropSliceTake_Benchmark.bs_large_slice_129_129 thrpt 20 275 245 086.247 ± 4969752.048 ops/s
[info] ByteString_dropSliceTake_Benchmark.bss_large_slice_80_80 thrpt 20 264 963 420.976 ± 4259289.143 ops/s
[info] ByteString_dropSliceTake_Benchmark.bss_large_slice_129_129 thrpt 20 265 477 577.022 ± 4623974.283 ops/s
*/
// 18 == "http://example.com", a typical url length
// 18 == "http://example.com", a typical url length
@Benchmark
def bs_large_drop_0: ByteString =

View file

@ -48,7 +48,7 @@ class ImmutableIntMapBench {
} else from
}
val odd1000 = (0 to 1000).iterator.filter(_ % 2 == 1).foldLeft(ImmutableIntMap.empty)((l, i) => l.updated(i, i))
val odd1000 = (0 to 1000).iterator.filter(_ % 2 == 1).foldLeft(ImmutableIntMap.empty)((l, i) l.updated(i, i))
@Benchmark
@OperationsPerInvocation(1)
@ -109,4 +109,4 @@ class ImmutableIntMapBench {
@Benchmark
@OperationsPerInvocation(1000)
def getHiElement(): ImmutableIntMap = getKey(iterations = 1000, key = 999, from = odd1000)
}
}

View file

@ -45,7 +45,7 @@ class LruBoundedCacheBench {
}
// Loading
for (i <- 1 to threshold) {
for (i 1 to threshold) {
val value = random.nextString(stringSize)
if (i == 1) toGet = value
toRemove = value

View file

@ -281,9 +281,9 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
start(typeName, entityProps, settings,
extractEntityId = {
case msg if messageExtractor.entityId(msg) ne null
(messageExtractor.entityId(msg), messageExtractor.entityMessage(msg))
},
case msg if messageExtractor.entityId(msg) ne null
(messageExtractor.entityId(msg), messageExtractor.entityMessage(msg))
},
extractShardId = msg messageExtractor.shardId(msg),
allocationStrategy = allocationStrategy,
handOffStopMessage = handOffStopMessage)
@ -438,9 +438,9 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
startProxy(typeName, Option(role.orElse(null)), Option(dataCenter.orElse(null)),
extractEntityId = {
case msg if messageExtractor.entityId(msg) ne null
(messageExtractor.entityId(msg), messageExtractor.entityMessage(msg))
},
case msg if messageExtractor.entityId(msg) ne null
(messageExtractor.entityId(msg), messageExtractor.entityMessage(msg))
},
extractShardId = msg messageExtractor.shardId(msg))
}

View file

@ -121,4 +121,4 @@ class DistributedPubSubMediatorNotSendingToDeadLettersSpec
}
}
}
}
}

View file

@ -134,7 +134,7 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg
}
def addMember(m: Member): Unit =
if (m.uniqueAddress != selfUniqueAddress && // is not self
if (m.uniqueAddress != selfUniqueAddress && // is not self
!state.contains(m.uniqueAddress) && // not already added
filterInternalClusterMembers(m) // should be watching members from this DC (internal / external)
) {

View file

@ -321,20 +321,20 @@ private[cluster] object CrossDcHeartbeatingState {
crossDcFailureDetector,
nrOfMonitoredNodesPerDc,
state = {
// TODO unduplicate this with the logic in MembershipState.ageSortedTopOldestMembersPerDc
val groupedByDc = members.filter(atLeastInUpState).groupBy(_.dataCenter)
// TODO unduplicate this with the logic in MembershipState.ageSortedTopOldestMembersPerDc
val groupedByDc = members.filter(atLeastInUpState).groupBy(_.dataCenter)
if (members.ordering == Member.ageOrdering) {
// we already have the right ordering
groupedByDc
} else {
// we need to enforce the ageOrdering for the SortedSet in each DC
groupedByDc.map {
case (dc, ms)
dc (SortedSet.empty[Member](Member.ageOrdering) union ms)
if (members.ordering == Member.ageOrdering) {
// we already have the right ordering
groupedByDc
} else {
// we need to enforce the ageOrdering for the SortedSet in each DC
groupedByDc.map {
case (dc, ms)
dc (SortedSet.empty[Member](Member.ageOrdering) union ms)
}
}
}
})
})
}
}

View file

@ -532,4 +532,4 @@ private[akka] trait ClusterRouterActor { this: RouterActor ⇒
case ReachableMember(m)
if (isAvailable(m)) addMember(m)
}
}
}

View file

@ -343,7 +343,7 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro
case cause: Exception
throw new AssertionError(s"Member ${toBeRemovedAddress} was not removed within ${timeout}!", cause)
}
awaiter ! PoisonPill // you've done your job, now die
awaiter ! PoisonPill // you've done your job, now die
enterBarrier("member-shutdown")
markNodeAsUnavailable(toBeRemovedAddress)

View file

@ -133,9 +133,9 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp
val router = system.actorOf(
ClusterRouterGroup(
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = false, useRoles = roles)).props,
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = false, useRoles = roles)).props,
"router-2b")
awaitAssert(currentRoutees(router).size should ===(4))
@ -193,9 +193,9 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp
val router = system.actorOf(
ClusterRouterGroup(
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = true, useRoles = roles)).props,
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = true, useRoles = roles)).props,
"router-3b")
awaitAssert(currentRoutees(router).size should ===(4))
@ -253,9 +253,9 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp
val router = system.actorOf(
ClusterRouterGroup(
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = true, useRoles = roles)).props,
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = true, useRoles = roles)).props,
"router-4b")
awaitAssert(currentRoutees(router).size should ===(2))
@ -313,9 +313,9 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp
val router = system.actorOf(
ClusterRouterGroup(
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = true, useRoles = roles)).props,
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = true, useRoles = roles)).props,
"router-5b")
awaitAssert(currentRoutees(router).size should ===(6))

View file

@ -249,4 +249,4 @@ akka.loglevel=DEBUG
}
}
}
}

View file

@ -21,8 +21,8 @@ class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen {
callTimeout = 200 millis,
resetTimeout = 1 second,
failureDetector = {
_ == "FAILURE"
})
_ == "FAILURE"
})
trait CircuitBreakerScenario {
val sender = TestProbe()

View file

@ -64,11 +64,11 @@ class CircuitBreaker(potentiallyFailingService: ActorRef) extends Actor with Act
CircuitBreakerPropsBuilder(maxFailures = 3, callTimeout = 2.seconds, resetTimeout = 30.seconds)
.copy(
failureDetector = {
_ match {
case Response(Left(_)) true
case _ false
}
})
_ match {
case Response(Left(_)) true
case _ false
}
})
.props(potentiallyFailingService),
"serviceCircuitBreaker")
@ -106,15 +106,15 @@ class CircuitBreakerAsk(potentiallyFailingService: ActorRef) extends Actor with
CircuitBreakerPropsBuilder(maxFailures = 3, callTimeout = askTimeout, resetTimeout = 30.seconds)
.copy(
failureDetector = {
_ match {
case Response(Left(_)) true
case _ false
}
})
_ match {
case Response(Left(_)) true
case _ false
}
})
.copy(
openCircuitFailureConverter = { failure
Left(s"Circuit open when processing ${failure.failedMsg}")
})
Left(s"Circuit open when processing ${failure.failedMsg}")
})
.props(potentiallyFailingService),
"serviceCircuitBreaker")

View file

@ -246,7 +246,7 @@ class PersistentReceivePipelineSpec(config: Config) extends AkkaSpec(config) wit
override def unhandled(message: Any) = probeRef ! message
}))
// 11 ( -> not handled by EvenHalverInterceptor) -> 22 but > 10 so not handled in main receive:
// 11 ( -> not handled by EvenHalverInterceptor) -> 22 but > 10 so not handled in main receive:
// original message falls back to unhandled implementation...
replier ! 11
probe.expectMsg(11)
@ -505,4 +505,4 @@ object AfterSamples {
}
}
//#interceptor-after
}
}

View file

@ -9,7 +9,7 @@ object Key {
*/
def unapply(k: Key[_]): Option[String] = Some(k.id)
private[akka]type KeyR = Key[ReplicatedData]
private[akka] type KeyR = Key[ReplicatedData]
type KeyId = String

View file

@ -36,7 +36,7 @@ object ORSet {
/**
* INTERNAL API
*/
@InternalApi private[akka]type Dot = VersionVector
@InternalApi private[akka] type Dot = VersionVector
sealed trait DeltaOp extends ReplicatedDelta with RequiresCausalDeliveryOfDeltas with ReplicatedDataSerialization {
type T = DeltaOp

View file

@ -9,5 +9,5 @@ trait CompileOnlySpec {
* Given a block of code... does NOT execute it.
* Useful when writing code samples in tests, which should only be compiled.
*/
def compileOnlySpec(body: => Unit) = ()
def compileOnlySpec(body: Unit) = ()
}

View file

@ -30,8 +30,8 @@ class MyActor extends Actor {
val log = Logging(context.system, this)
def receive = {
case "test" => log.info("received test")
case _ => log.info("received unknown message")
case "test" log.info("received test")
case _ log.info("received unknown message")
}
}
//#my-actor
@ -44,20 +44,20 @@ class FirstActor extends Actor {
val child = context.actorOf(Props[MyActor], name = "myChild")
//#plus-some-behavior
def receive = {
case x => sender() ! x
case x sender() ! x
}
//#plus-some-behavior
}
//#context-actorOf
class ActorWithArgs(arg: String) extends Actor {
def receive = { case _ => () }
def receive = { case _ () }
}
//#actor-with-value-class-argument
class Argument(val value: String) extends AnyVal
class ValueClassActor(arg: Argument) extends Actor {
def receive = { case _ => () }
def receive = { case _ () }
}
object ValueClassActor {
@ -82,7 +82,7 @@ class DemoActorWrapper extends Actor {
class DemoActor(magicNumber: Int) extends Actor {
def receive = {
case x: Int => sender() ! (x + magicNumber)
case x: Int sender() ! (x + magicNumber)
}
}
@ -92,7 +92,7 @@ class DemoActorWrapper extends Actor {
// ...
//#props-factory
def receive = {
case msg =>
case msg
}
//#props-factory
}
@ -110,8 +110,8 @@ class ActorWithMessagesWrapper {
class MyActor extends Actor with ActorLogging {
import MyActor._
def receive = {
case Greeting(greeter) => log.info(s"I was greeted by $greeter.")
case Goodbye => log.info("Someone said goodbye to me.")
case Greeting(greeter) log.info(s"I was greeted by $greeter.")
case Goodbye log.info("Someone said goodbye to me.")
}
}
//#messages-in-companion
@ -138,13 +138,13 @@ class Hook extends Actor {
class ReplyException extends Actor {
def receive = {
case _ =>
case _
//#reply-exception
try {
val result = operation()
sender() ! result
} catch {
case e: Exception =>
case e: Exception
sender() ! akka.actor.Status.Failure(e)
throw e
}
@ -162,10 +162,10 @@ class StoppingActorsWrapper {
val child: ActorRef = ???
def receive = {
case "interrupt-child" =>
case "interrupt-child"
context stop child
case "done" =>
case "done"
context stop self
}
@ -184,15 +184,15 @@ class Manager extends Actor {
val worker = context.watch(context.actorOf(Props[Cruncher], "worker"))
def receive = {
case "job" => worker ! "crunch"
case Shutdown =>
case "job" worker ! "crunch"
case Shutdown
worker ! PoisonPill
context become shuttingDown
}
def shuttingDown: Receive = {
case "job" => sender() ! "service unavailable, shutting down"
case Terminated(`worker`) =>
case "job" sender() ! "service unavailable, shutting down"
case Terminated(`worker`)
context stop self
}
}
@ -200,7 +200,7 @@ class Manager extends Actor {
class Cruncher extends Actor {
def receive = {
case "crunch" => // crunch...
case "crunch" // crunch...
}
}
@ -211,10 +211,10 @@ class Swapper extends Actor {
val log = Logging(system, this)
def receive = {
case Swap =>
case Swap
log.info("Hi")
become({
case Swap =>
case Swap
log.info("Ho")
unbecome() // resets the latest 'become' (just for fun)
}, discardOld = false) // push on top instead of replace
@ -236,22 +236,22 @@ object SwapperApp extends App {
//#receive-orElse
trait ProducerBehavior {
this: Actor =>
this: Actor
val producerBehavior: Receive = {
case GiveMeThings =>
case GiveMeThings
sender() ! Give("thing")
}
}
trait ConsumerBehavior {
this: Actor with ActorLogging =>
this: Actor with ActorLogging
val consumerBehavior: Receive = {
case ref: ActorRef =>
case ref: ActorRef
ref ! GiveMeThings
case Give(thing) =>
case Give(thing)
log.info("Got a thing! It's {}", thing)
}
}
@ -288,7 +288,7 @@ class Pinger extends Actor {
var countDown = 100
def receive = {
case Pong =>
case Pong
println(s"${self.path} received pong, count down $countDown")
if (countDown > 0) {
@ -303,7 +303,7 @@ class Pinger extends Actor {
class Ponger(pinger: ActorRef) extends Actor {
def receive = {
case Ping =>
case Ping
println(s"${self.path} received ping")
pinger ! Pong
}
@ -330,7 +330,7 @@ class ActorDocSpec extends AkkaSpec("""
import context._
val myActor = actorOf(Props[MyActor], name = "myactor")
def receive = {
case x => myActor ! x
case x myActor ! x
}
}
//#import-context
@ -347,17 +347,17 @@ class ActorDocSpec extends AkkaSpec("""
// TODO: convert docs to AkkaSpec(Map(...))
val filter = EventFilter.custom {
case e: Logging.Info => true
case _ => false
case e: Logging.Info true
case _ false
}
system.eventStream.publish(TestEvent.Mute(filter))
system.eventStream.subscribe(testActor, classOf[Logging.Info])
myActor ! "test"
expectMsgPF(1 second) { case Logging.Info(_, _, "received test") => true }
expectMsgPF(1 second) { case Logging.Info(_, _, "received test") true }
myActor ! "unknown"
expectMsgPF(1 second) { case Logging.Info(_, _, "received unknown message") => true }
expectMsgPF(1 second) { case Logging.Info(_, _, "received unknown message") true }
system.eventStream.unsubscribe(testActor)
system.eventStream.publish(TestEvent.UnMute(filter))
@ -436,8 +436,8 @@ class ActorDocSpec extends AkkaSpec("""
"creating actor with IndirectActorProducer" in {
class Echo(name: String) extends Actor {
def receive = {
case n: Int => sender() ! name
case message =>
case n: Int sender() ! name
case message
val target = testActor
//#forward
target forward message
@ -514,10 +514,10 @@ class ActorDocSpec extends AkkaSpec("""
// To set an initial delay
context.setReceiveTimeout(30 milliseconds)
def receive = {
case "Hello" =>
case "Hello"
// To set in a response to a message
context.setReceiveTimeout(100 milliseconds)
case ReceiveTimeout =>
case ReceiveTimeout
// To turn it off
context.setReceiveTimeout(Duration.Undefined)
throw new RuntimeException("Receive timed out")
@ -530,18 +530,18 @@ class ActorDocSpec extends AkkaSpec("""
class HotSwapActor extends Actor {
import context._
def angry: Receive = {
case "foo" => sender() ! "I am already angry?"
case "bar" => become(happy)
case "foo" sender() ! "I am already angry?"
case "bar" become(happy)
}
def happy: Receive = {
case "bar" => sender() ! "I am already happy :-)"
case "foo" => become(angry)
case "bar" sender() ! "I am already happy :-)"
case "foo" become(angry)
}
def receive = {
case "foo" => become(angry)
case "bar" => become(happy)
case "foo" become(angry)
case "bar" become(happy)
}
}
//#hot-swap-actor
@ -555,16 +555,16 @@ class ActorDocSpec extends AkkaSpec("""
import akka.actor.Stash
class ActorWithProtocol extends Actor with Stash {
def receive = {
case "open" =>
case "open"
unstashAll()
context.become({
case "write" => // do writing...
case "close" =>
case "write" // do writing...
case "close"
unstashAll()
context.unbecome()
case msg => stash()
case msg stash()
}, discardOld = false) // stack on top instead of replacing
case msg => stash()
case msg stash()
}
}
//#stash
@ -581,9 +581,9 @@ class ActorDocSpec extends AkkaSpec("""
var lastSender = context.system.deadLetters
def receive = {
case "kill" =>
case "kill"
context.stop(child); lastSender = sender()
case Terminated(`child`) => lastSender ! "finished"
case Terminated(`child`) lastSender ! "finished"
}
}
//#watch
@ -606,7 +606,7 @@ class ActorDocSpec extends AkkaSpec("""
victim ! Kill
expectMsgPF(hint = "expecting victim to terminate") {
case Terminated(v) if v == victim => v // the Actor has indeed terminated
case Terminated(v) if v == victim v // the Actor has indeed terminated
}
//#kill
}
@ -640,15 +640,15 @@ class ActorDocSpec extends AkkaSpec("""
context.actorSelection("/user/another") ! Identify(identifyId)
def receive = {
case ActorIdentity(`identifyId`, Some(ref)) =>
case ActorIdentity(`identifyId`, Some(ref))
context.watch(ref)
context.become(active(ref))
case ActorIdentity(`identifyId`, None) => context.stop(self)
case ActorIdentity(`identifyId`, None) context.stop(self)
}
def active(another: ActorRef): Actor.Receive = {
case Terminated(`another`) => context.stop(self)
case Terminated(`another`) context.stop(self)
}
}
//#identify
@ -673,7 +673,7 @@ class ActorDocSpec extends AkkaSpec("""
// the actor has been stopped
} catch {
// the actor wasn't stopped within 5 seconds
case e: akka.pattern.AskTimeoutException =>
case e: akka.pattern.AskTimeoutException
}
//#gracefulStop
}
@ -690,9 +690,9 @@ class ActorDocSpec extends AkkaSpec("""
val f: Future[Result] =
for {
x <- ask(actorA, Request).mapTo[Int] // call pattern directly
s <- (actorB ask Request).mapTo[String] // call by implicit conversion
d <- (actorC ? Request).mapTo[Double] // call by symbolic name
x ask(actorA, Request).mapTo[Int] // call pattern directly
s (actorB ask Request).mapTo[String] // call by implicit conversion
d (actorC ? Request).mapTo[Double] // call by symbolic name
} yield Result(x, s, d)
f pipeTo actorD // .. or ..
@ -702,12 +702,12 @@ class ActorDocSpec extends AkkaSpec("""
class Replier extends Actor {
def receive = {
case ref: ActorRef =>
case ref: ActorRef
//#reply-with-sender
sender().tell("reply", context.parent) // replies will go back to parent
sender().!("reply")(context.parent) // alternative syntax (beware of the parens!)
//#reply-with-sender
case x =>
case x
//#reply-without-sender
sender() ! x // replies will go to this actor
//#reply-without-sender
@ -730,8 +730,8 @@ class ActorDocSpec extends AkkaSpec("""
"using ActorDSL outside of akka.actor package" in {
import akka.actor.ActorDSL._
actor(new Act {
superviseWith(OneForOneStrategy() { case _ => Stop; Restart; Resume; Escalate })
superviseWith(AllForOneStrategy() { case _ => Stop; Restart; Resume; Escalate })
superviseWith(OneForOneStrategy() { case _ Stop; Restart; Resume; Escalate })
superviseWith(AllForOneStrategy() { case _ Stop; Restart; Resume; Escalate })
})
}
@ -739,12 +739,12 @@ class ActorDocSpec extends AkkaSpec("""
val someActor = system.actorOf(Props(classOf[Replier], this))
//#coordinated-shutdown-addTask
CoordinatedShutdown(system).addTask(
CoordinatedShutdown.PhaseBeforeServiceUnbind, "someTaskName") { () =>
import akka.pattern.ask
import system.dispatcher
implicit val timeout = Timeout(5.seconds)
(someActor ? "stop").map(_ => Done)
}
CoordinatedShutdown.PhaseBeforeServiceUnbind, "someTaskName") { ()
import akka.pattern.ask
import system.dispatcher
implicit val timeout = Timeout(5.seconds)
(someActor ? "stop").map(_ Done)
}
//#coordinated-shutdown-addTask
//#coordinated-shutdown-jvm-hook

View file

@ -11,7 +11,7 @@ import scala.concurrent.{ ExecutionContext, Future }
// #blocking-in-actor
class BlockingActor extends Actor {
def receive = {
case i: Int =>
case i: Int
Thread.sleep(5000) //block for 5 seconds, representing blocking I/O, etc
println(s"Blocking operation finished: ${i}")
}
@ -23,7 +23,7 @@ class BlockingFutureActor extends Actor {
implicit val executionContext: ExecutionContext = context.dispatcher
def receive = {
case i: Int =>
case i: Int
println(s"Calling blocking Future: ${i}")
Future {
Thread.sleep(5000) //block for 5 seconds
@ -38,7 +38,7 @@ class SeparateDispatcherFutureActor extends Actor {
implicit val executionContext: ExecutionContext = context.system.dispatchers.lookup("my-blocking-dispatcher")
def receive = {
case i: Int =>
case i: Int
println(s"Calling blocking Future: ${i}")
Future {
Thread.sleep(5000) //block for 5 seconds
@ -51,7 +51,7 @@ class SeparateDispatcherFutureActor extends Actor {
// #print-actor
class PrintActor extends Actor {
def receive = {
case i: Int =>
case i: Int
println(s"PrintActor: ${i}")
}
}
@ -66,7 +66,7 @@ object BlockingDispatcherSample {
val actor1 = system.actorOf(Props(new BlockingFutureActor))
val actor2 = system.actorOf(Props(new PrintActor))
for (i <- 1 to 100) {
for (i 1 to 100) {
actor1 ! i
actor2 ! i
}
@ -102,7 +102,7 @@ object SeparateDispatcherSample {
val actor1 = system.actorOf(Props(new SeparateDispatcherFutureActor))
val actor2 = system.actorOf(Props(new PrintActor))
for (i <- 1 to 100) {
for (i 1 to 100) {
actor1 ! i
actor2 ! i
}

View file

@ -5,7 +5,7 @@ package docs.actor
import language.postfixOps
import akka.testkit.{ AkkaSpec => MyFavoriteTestFrameWorkPlusAkkaTestKit }
import akka.testkit.{ AkkaSpec MyFavoriteTestFrameWorkPlusAkkaTestKit }
import akka.util.ByteString
//#test-code
@ -54,24 +54,24 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit {
//#when-syntax
when(Idle) {
case Event(SetTarget(ref), Uninitialized) =>
case Event(SetTarget(ref), Uninitialized)
stay using Todo(ref, Vector.empty)
}
//#when-syntax
//#transition-elided
onTransition {
case Active -> Idle =>
case Active -> Idle
stateData match {
case Todo(ref, queue) => ref ! Batch(queue)
case _ => // nothing to do
case Todo(ref, queue) ref ! Batch(queue)
case _ // nothing to do
}
}
//#transition-elided
//#when-syntax
when(Active, stateTimeout = 1 second) {
case Event(Flush | StateTimeout, t: Todo) =>
case Event(Flush | StateTimeout, t: Todo)
goto(Idle) using t.copy(queue = Vector.empty)
}
//#when-syntax
@ -79,10 +79,10 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit {
//#unhandled-elided
whenUnhandled {
// common code for both states
case Event(Queue(obj), t @ Todo(_, v)) =>
case Event(Queue(obj), t @ Todo(_, v))
goto(Active) using t.copy(queue = v :+ obj)
case Event(e, s) =>
case Event(e, s)
log.warning("received unhandled request {} in state {}/{}", e, stateName, s)
stay
}
@ -108,16 +108,16 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit {
//#modifier-syntax
when(SomeState) {
case Event(msg, _) =>
case Event(msg, _)
goto(Processing) using (newData) forMax (5 seconds) replying (WillDo)
}
//#modifier-syntax
//#transition-syntax
onTransition {
case Idle -> Active => setTimer("timeout", Tick, 1 second, repeat = true)
case Active -> _ => cancelTimer("timeout")
case x -> Idle => log.info("entering Idle from " + x)
case Idle -> Active setTimer("timeout", Tick, 1 second, repeat = true)
case Active -> _ cancelTimer("timeout")
case x -> Idle log.info("entering Idle from " + x)
}
//#transition-syntax
@ -131,7 +131,7 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit {
//#stop-syntax
when(Error) {
case Event("stop", _) =>
case Event("stop", _)
// do cleanup ...
stop()
}
@ -139,38 +139,38 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit {
//#transform-syntax
when(SomeState)(transform {
case Event(bytes: ByteString, read) => stay using (read + bytes.length)
case Event(bytes: ByteString, read) stay using (read + bytes.length)
} using {
case s @ FSM.State(state, read, timeout, stopReason, replies) if read > 1000 =>
case s @ FSM.State(state, read, timeout, stopReason, replies) if read > 1000
goto(Processing)
})
//#transform-syntax
//#alt-transform-syntax
val processingTrigger: PartialFunction[State, State] = {
case s @ FSM.State(state, read, timeout, stopReason, replies) if read > 1000 =>
case s @ FSM.State(state, read, timeout, stopReason, replies) if read > 1000
goto(Processing)
}
when(SomeState)(transform {
case Event(bytes: ByteString, read) => stay using (read + bytes.length)
case Event(bytes: ByteString, read) stay using (read + bytes.length)
} using processingTrigger)
//#alt-transform-syntax
//#termination-syntax
onTermination {
case StopEvent(FSM.Normal, state, data) => // ...
case StopEvent(FSM.Shutdown, state, data) => // ...
case StopEvent(FSM.Failure(cause), state, data) => // ...
case StopEvent(FSM.Normal, state, data) // ...
case StopEvent(FSM.Shutdown, state, data) // ...
case StopEvent(FSM.Failure(cause), state, data) // ...
}
//#termination-syntax
//#unhandled-syntax
whenUnhandled {
case Event(x: X, data) =>
case Event(x: X, data)
log.info("Received unhandled event: " + x)
stay
case Event(msg, _) =>
case Event(msg, _)
log.warning("Received unknown event: " + msg)
goto(Error)
}
@ -184,7 +184,7 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit {
//#body-elided
override def logDepth = 12
onTermination {
case StopEvent(FSM.Failure(_), state, data) =>
case StopEvent(FSM.Failure(_), state, data)
val lastEvents = getLog.mkString("\n\t")
log.warning("Failure in state " + state + " with data " + data + "\n" +
"Events leading up to this point:\n\t" + lastEvents)

View file

@ -49,14 +49,14 @@ class Listener extends Actor with ActorLogging {
context.setReceiveTimeout(15 seconds)
def receive = {
case Progress(percent) =>
case Progress(percent)
log.info("Current progress: {} %", percent)
if (percent >= 100.0) {
log.info("That's all, shutting down")
context.system.terminate()
}
case ReceiveTimeout =>
case ReceiveTimeout
// No progress within 15 seconds, ServiceUnavailable
log.error("Shutting down due to unavailable service")
context.system.terminate()
@ -83,7 +83,7 @@ class Worker extends Actor with ActorLogging {
// Stop the CounterService child if it throws ServiceUnavailable
override val supervisorStrategy = OneForOneStrategy() {
case _: CounterService.ServiceUnavailable => Stop
case _: CounterService.ServiceUnavailable Stop
}
// The sender of the initial Start message will continuously be notified
@ -94,18 +94,18 @@ class Worker extends Actor with ActorLogging {
import context.dispatcher // Use this Actors' Dispatcher as ExecutionContext
def receive = LoggingReceive {
case Start if progressListener.isEmpty =>
case Start if progressListener.isEmpty
progressListener = Some(sender())
context.system.scheduler.schedule(Duration.Zero, 1 second, self, Do)
case Do =>
case Do
counterService ! Increment(1)
counterService ! Increment(1)
counterService ! Increment(1)
// Send current progress to the initial sender
counterService ? GetCurrentCount map {
case CurrentCount(_, count) => Progress(100.0 * count / totalCount)
case CurrentCount(_, count) Progress(100.0 * count / totalCount)
} pipeTo progressListener.get
}
}
@ -137,7 +137,7 @@ class CounterService extends Actor {
override val supervisorStrategy = OneForOneStrategy(
maxNrOfRetries = 3,
withinTimeRange = 5 seconds) {
case _: Storage.StorageException => Restart
case _: Storage.StorageException Restart
}
val key = self.path.name
@ -168,21 +168,21 @@ class CounterService extends Actor {
def receive = LoggingReceive {
case Entry(k, v) if k == key && counter == None =>
case Entry(k, v) if k == key && counter == None
// Reply from Storage of the initial value, now we can create the Counter
val c = context.actorOf(Props(classOf[Counter], key, v))
counter = Some(c)
// Tell the counter to use current storage
c ! UseStorage(storage)
// and send the buffered backlog to the counter
for ((replyTo, msg) <- backlog) c.tell(msg, sender = replyTo)
for ((replyTo, msg) backlog) c.tell(msg, sender = replyTo)
backlog = IndexedSeq.empty
case msg: Increment => forwardOrPlaceInBacklog(msg)
case msg: Increment forwardOrPlaceInBacklog(msg)
case msg: GetCurrentCount => forwardOrPlaceInBacklog(msg)
case msg: GetCurrentCount forwardOrPlaceInBacklog(msg)
case Terminated(actorRef) if Some(actorRef) == storage =>
case Terminated(actorRef) if Some(actorRef) == storage
// After 3 restarts the storage child is stopped.
// We receive Terminated because we watch the child, see initStorage.
storage = None
@ -191,7 +191,7 @@ class CounterService extends Actor {
// Try to re-establish storage after while
context.system.scheduler.scheduleOnce(10 seconds, self, Reconnect)
case Reconnect =>
case Reconnect
// Re-establish storage after the scheduled delay
initStorage()
}
@ -201,8 +201,8 @@ class CounterService extends Actor {
// the counter. Before that we place the messages in a backlog, to be sent
// to the counter when it is initialized.
counter match {
case Some(c) => c forward msg
case None =>
case Some(c) c forward msg
case None
if (backlog.size >= MaxBacklog)
throw new ServiceUnavailable(
"CounterService not available, lack of initial value")
@ -232,15 +232,15 @@ class Counter(key: String, initialValue: Long) extends Actor {
var storage: Option[ActorRef] = None
def receive = LoggingReceive {
case UseStorage(s) =>
case UseStorage(s)
storage = s
storeCount()
case Increment(n) =>
case Increment(n)
count += n
storeCount()
case GetCurrentCount =>
case GetCurrentCount
sender() ! CurrentCount(key, count)
}
@ -273,8 +273,8 @@ class Storage extends Actor {
val db = DummyDB
def receive = LoggingReceive {
case Store(Entry(key, count)) => db.save(key, count)
case Get(key) => sender() ! Entry(key, db.load(key).getOrElse(0L))
case Store(Entry(key, count)) db.save(key, count)
case Get(key) sender() ! Entry(key, db.load(key).getOrElse(0L))
}
}

View file

@ -27,15 +27,15 @@ object FaultHandlingDocSpec {
override val supervisorStrategy =
OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) {
case _: ArithmeticException => Resume
case _: NullPointerException => Restart
case _: IllegalArgumentException => Stop
case _: Exception => Escalate
case _: ArithmeticException Resume
case _: NullPointerException Restart
case _: IllegalArgumentException Stop
case _: Exception Escalate
}
//#strategy
def receive = {
case p: Props => sender() ! context.actorOf(p)
case p: Props sender() ! context.actorOf(p)
}
}
//#supervisor
@ -49,15 +49,15 @@ object FaultHandlingDocSpec {
override val supervisorStrategy =
OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) {
case _: ArithmeticException => Resume
case _: NullPointerException => Restart
case _: IllegalArgumentException => Stop
case _: Exception => Escalate
case _: ArithmeticException Resume
case _: NullPointerException Restart
case _: IllegalArgumentException Stop
case _: Exception Escalate
}
//#strategy2
def receive = {
case p: Props => sender() ! context.actorOf(p)
case p: Props sender() ! context.actorOf(p)
}
// override default to kill all children during restart
override def preRestart(cause: Throwable, msg: Option[Any]) {}
@ -72,9 +72,9 @@ object FaultHandlingDocSpec {
override val supervisorStrategy =
OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) {
case _: ArithmeticException => Resume
case t =>
super.supervisorStrategy.decider.applyOrElse(t, (_: Any) => Escalate)
case _: ArithmeticException Resume
case t
super.supervisorStrategy.decider.applyOrElse(t, (_: Any) Escalate)
}
//#default-strategy-fallback
@ -85,9 +85,9 @@ object FaultHandlingDocSpec {
class Child extends Actor {
var state = 0
def receive = {
case ex: Exception => throw ex
case x: Int => state = x
case "get" => sender() ! state
case ex: Exception throw ex
case x: Int state = x
case "get" sender() ! state
}
}
//#child
@ -146,7 +146,7 @@ class FaultHandlingDocSpec(_system: ActorSystem) extends TestKit(_system)
//#stop
watch(child) // have testActor watch child
child ! new IllegalArgumentException // break it
expectMsgPF() { case Terminated(`child`) => () }
expectMsgPF() { case Terminated(`child`) () }
//#stop
}
EventFilter[Exception]("CRASH", occurrences = 2) intercept {
@ -159,7 +159,7 @@ class FaultHandlingDocSpec(_system: ActorSystem) extends TestKit(_system)
child2 ! new Exception("CRASH") // escalate failure
expectMsgPF() {
case t @ Terminated(`child2`) if t.existenceConfirmed => ()
case t @ Terminated(`child2`) if t.existenceConfirmed ()
}
//#escalate-kill
//#escalate-restart

View file

@ -10,7 +10,7 @@ object InitializationDocSpec {
class PreStartInitExample extends Actor {
override def receive = {
case _ => // Ignore
case _ // Ignore
}
//#preStartInit
@ -37,14 +37,14 @@ object InitializationDocSpec {
var initializeMe: Option[String] = None
override def receive = {
case "init" =>
case "init"
initializeMe = Some("Up and running")
context.become(initialized, discardOld = true)
}
def initialized: Receive = {
case "U OK?" => initializeMe foreach { sender() ! _ }
case "U OK?" initializeMe foreach { sender() ! _ }
}
//#messageInit

View file

@ -17,7 +17,7 @@ class PropsEdgeCaseSpec extends WordSpec with CompileOnlySpec {
//#props-edge-cases-value-class-example
class ValueActor(value: MyValueClass) extends Actor {
def receive = {
case multiplier: Long => sender() ! (value.v * multiplier)
case multiplier: Long sender() ! (value.v * multiplier)
}
}
val valueClassProp = Props(classOf[ValueActor], MyValueClass(5)) // Unsupported
@ -26,7 +26,7 @@ class PropsEdgeCaseSpec extends WordSpec with CompileOnlySpec {
//#props-edge-cases-default-values
class DefaultValueActor(a: Int, b: Int = 5) extends Actor {
def receive = {
case x: Int => sender() ! ((a + x) * b)
case x: Int sender() ! ((a + x) * b)
}
}
@ -34,7 +34,7 @@ class PropsEdgeCaseSpec extends WordSpec with CompileOnlySpec {
class DefaultValueActor2(b: Int = 5) extends Actor {
def receive = {
case x: Int => sender() ! (x * b)
case x: Int sender() ! (x * b)
}
}
val defaultValueProp2 = Props[DefaultValueActor2] // Unsupported

View file

@ -43,7 +43,7 @@ class SchedulerDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
val Tick = "tick"
class TickActor extends Actor {
def receive = {
case Tick => //Do something
case Tick //Do something
}
}
val tickActor = system.actorOf(Props(classOf[TickActor], this))

View file

@ -18,13 +18,13 @@ class SharedMutableStateDocSpec {
class EchoActor extends Actor {
def receive = {
case msg => sender() ! msg
case msg sender() ! msg
}
}
class CleanUpActor extends Actor {
def receive = {
case set: mutable.Set[_] => set.clear()
case set: mutable.Set[_] set.clear()
}
}
@ -43,7 +43,7 @@ class SharedMutableStateDocSpec {
}
def receive = {
case _ =>
case _
implicit val ec = context.dispatcher
implicit val timeout = Timeout(5 seconds) // needed for `?` below
@ -52,7 +52,7 @@ class SharedMutableStateDocSpec {
// application to break in weird ways
Future { state = "This will race" }
((echoActor ? Message("With this other one")).mapTo[Message])
.foreach { received => state = received.msg }
.foreach { received state = received.msg }
// Very bad: shared mutable object allows
// the other actor to mutate your own state,

View file

@ -22,10 +22,10 @@ object TimerDocSpec {
timers.startSingleTimer(TickKey, FirstTick, 500.millis)
def receive = {
case FirstTick =>
case FirstTick
// do something useful here
timers.startPeriodicTimer(TickKey, Tick, 1.second)
case Tick =>
case Tick
// do something useful here
}
}

View file

@ -4,7 +4,7 @@
package docs.actor
//#imports
import java.lang.String.{ valueOf => println }
import java.lang.String.{ valueOf println }
import akka.actor.{ ActorContext, ActorRef, TypedActor, TypedProps }
import akka.routing.RoundRobinGroup
@ -15,7 +15,7 @@ import scala.concurrent.duration._
//#imports
//Mr funny man avoids printing to stdout AND keeping docs alright
import java.lang.String.{ valueOf => println }
import java.lang.String.{ valueOf println }
//#typed-actor-iface
trait Squarer {
@ -111,7 +111,7 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
//#typed-actor-extension-tools
} catch {
case e: Exception => //dun care
case e: Exception //dun care
}
}
@ -181,7 +181,7 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
//Use "childSquarer" as a Squarer
//#typed-actor-hierarchy
} catch {
case e: Exception => //ignore
case e: Exception //ignore
}
}
@ -204,7 +204,7 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
// prepare routees
val routees: List[HasName] = List.fill(5) { namedActor() }
val routeePaths = routees map { r =>
val routeePaths = routees map { r
TypedActor(system).getActorRefFor(r).path.toStringWithoutAddress
}

View file

@ -34,16 +34,16 @@ class UnnestedReceives extends Actor {
}
def receive = {
case 'Replay => //Our first message should be a 'Replay message, all others are invalid
case 'Replay //Our first message should be a 'Replay message, all others are invalid
allOldMessages() foreach process //Process all old messages/events
become { //Switch behavior to look for the GoAhead signal
case 'GoAhead => //When we get the GoAhead signal we process all our buffered messages/events
case 'GoAhead //When we get the GoAhead signal we process all our buffered messages/events
queue foreach process
queue.clear
become { //Then we change behaviour to process incoming messages/events as they arrive
case msg => process(msg)
case msg process(msg)
}
case msg => //While we haven't gotten the GoAhead signal, buffer all incoming messages
case msg //While we haven't gotten the GoAhead signal, buffer all incoming messages
queue += msg //Here you have full control, you can handle overflow etc
}
}

View file

@ -59,7 +59,7 @@ class AgentDocSpec extends AkkaSpec {
agent send (_ * 2)
//#send
def longRunningOrBlockingFunction = (i: Int) => i * 1 // Just for the example code
def longRunningOrBlockingFunction = (i: Int) i * 1 // Just for the example code
def someExecutionContext() = scala.concurrent.ExecutionContext.Implicits.global // Just for the example code
//#send-off
// the ExecutionContext you want to run the function on
@ -82,7 +82,7 @@ class AgentDocSpec extends AkkaSpec {
val f3: Future[Int] = agent alter (_ * 2)
//#alter
def longRunningOrBlockingFunction = (i: Int) => i * 1 // Just for the example code
def longRunningOrBlockingFunction = (i: Int) i * 1 // Just for the example code
def someExecutionContext() = ExecutionContext.global // Just for the example code
//#alter-off
@ -103,7 +103,7 @@ class AgentDocSpec extends AkkaSpec {
import scala.concurrent.stm._
def transfer(from: Agent[Int], to: Agent[Int], amount: Int): Boolean = {
atomic { txn =>
atomic { txn
if (from.get < amount) false
else {
from send (_ - amount)
@ -134,19 +134,19 @@ class AgentDocSpec extends AkkaSpec {
val agent2 = Agent(5)
// uses foreach
for (value <- agent1)
for (value agent1)
println(value)
// uses map
val agent3 = for (value <- agent1) yield value + 1
val agent3 = for (value agent1) yield value + 1
// or using map directly
val agent4 = agent1 map (_ + 1)
// uses flatMap
val agent5 = for {
value1 <- agent1
value2 <- agent2
value1 agent1
value2 agent2
} yield value1 + value2
//#monadic-example

View file

@ -15,7 +15,7 @@ object Consumers {
def endpointUri = "file:data/input/actor"
def receive = {
case msg: CamelMessage => println("received %s" format msg.bodyAs[String])
case msg: CamelMessage println("received %s" format msg.bodyAs[String])
}
}
//#Consumer1
@ -28,7 +28,7 @@ object Consumers {
def endpointUri = "jetty:http://localhost:8877/camel/default"
def receive = {
case msg: CamelMessage => sender() ! ("Hello %s" format msg.bodyAs[String])
case msg: CamelMessage sender() ! ("Hello %s" format msg.bodyAs[String])
}
}
//#Consumer2
@ -45,7 +45,7 @@ object Consumers {
def endpointUri = "jms:queue:test"
def receive = {
case msg: CamelMessage =>
case msg: CamelMessage
sender() ! Ack
// on success
// ..
@ -65,7 +65,7 @@ object Consumers {
def endpointUri = "jetty:http://localhost:8877/camel/default"
override def replyTimeout = 500 millis
def receive = {
case msg: CamelMessage => sender() ! ("Hello %s" format msg.bodyAs[String])
case msg: CamelMessage sender() ! ("Hello %s" format msg.bodyAs[String])
}
}
//#Consumer4

View file

@ -18,9 +18,9 @@ object CustomRoute {
import akka.camel._
class Responder extends Actor {
def receive = {
case msg: CamelMessage =>
case msg: CamelMessage
sender() ! (msg.mapBody {
body: String => "received %s" format body
body: String "received %s" format body
})
}
}
@ -47,9 +47,9 @@ object CustomRoute {
class ErrorThrowingConsumer(override val endpointUri: String) extends Consumer {
def receive = {
case msg: CamelMessage => throw new Exception("error: %s" format msg.body)
case msg: CamelMessage throw new Exception("error: %s" format msg.body)
}
override def onRouteDefinition = (rd) => rd.onException(classOf[Exception]).
override def onRouteDefinition = (rd) rd.onException(classOf[Exception]).
handled(true).transform(Builder.exceptionMessage).end
final override def preRestart(reason: Throwable, message: Option[Any]) {

View file

@ -17,8 +17,8 @@ object Introduction {
def endpointUri = "mina2:tcp://localhost:6200?textline=true"
def receive = {
case msg: CamelMessage => { /* ... */ }
case _ => { /* ... */ }
case msg: CamelMessage { /* ... */ }
case _ { /* ... */ }
}
}
@ -37,8 +37,8 @@ object Introduction {
def endpointUri = "jetty:http://localhost:8877/example"
def receive = {
case msg: CamelMessage => { /* ... */ }
case _ => { /* ... */ }
case msg: CamelMessage { /* ... */ }
case _ { /* ... */ }
}
}
//#Consumer
@ -87,8 +87,8 @@ object Introduction {
def endpointUri = "mina2:tcp://localhost:6200?textline=true"
def receive = {
case msg: CamelMessage => { /* ... */ }
case _ => { /* ... */ }
case msg: CamelMessage { /* ... */ }
case _ { /* ... */ }
}
}
val system = ActorSystem("some-system")

View file

@ -33,7 +33,7 @@ object Producers {
class ResponseReceiver extends Actor {
def receive = {
case msg: CamelMessage =>
case msg: CamelMessage
// do something with the forwarded response
}
}
@ -61,11 +61,11 @@ object Producers {
def endpointUri = uri
def upperCase(msg: CamelMessage) = msg.mapBody {
body: String => body.toUpperCase
body: String body.toUpperCase
}
override def transformOutgoingMessage(msg: Any) = msg match {
case msg: CamelMessage => upperCase(msg)
case msg: CamelMessage upperCase(msg)
}
}
//#TransformOutgoingMessage
@ -106,7 +106,7 @@ object Producers {
import akka.actor.Actor
class MyActor extends Actor {
def receive = {
case msg =>
case msg
val template = CamelExtension(context.system).template
template.sendBody("direct:news", msg)
}
@ -118,7 +118,7 @@ object Producers {
import akka.actor.Actor
class MyActor extends Actor {
def receive = {
case msg =>
case msg
val template = CamelExtension(context.system).template
sender() ! template.requestBody("direct:news", msg)
}
@ -126,4 +126,4 @@ object Producers {
//#RequestProducerTemplate
}
}
}

View file

@ -9,7 +9,7 @@ object PublishSubscribe {
def endpointUri = uri
def receive = {
case msg: CamelMessage => println("%s received: %s" format (name, msg.body))
case msg: CamelMessage println("%s received: %s" format (name, msg.body))
}
}
@ -25,7 +25,7 @@ object PublishSubscribe {
def endpointUri = uri
def receive = {
case msg: CamelMessage => {
case msg: CamelMessage {
publisher ! msg.bodyAs[String]
sender() ! ("message published")
}

View file

@ -36,9 +36,9 @@ class DangerousActor extends Actor with ActorLogging {
def dangerousCall: String = "This really isn't that dangerous of a call after all"
def receive = {
case "is my middle name" =>
case "is my middle name"
breaker.withCircuitBreaker(Future(dangerousCall)) pipeTo sender()
case "block for me" =>
case "block for me"
sender() ! breaker.withSyncCircuitBreaker(dangerousCall)
}
//#circuit-breaker-usage
@ -62,16 +62,16 @@ class TellPatternActor(recipient: ActorRef) extends Actor with ActorLogging {
import akka.actor.ReceiveTimeout
def receive = {
case "call" if breaker.isClosed => {
case "call" if breaker.isClosed {
recipient ! "message"
}
case "response" => {
case "response" {
breaker.succeed()
}
case err: Throwable => {
case err: Throwable {
breaker.fail()
}
case ReceiveTimeout => {
case ReceiveTimeout {
breaker.fail()
}
}
@ -82,9 +82,9 @@ class EvenNoFailureActor extends Actor {
import context.dispatcher
//#even-no-as-failure
def luckyNumber(): Future[Int] = {
val evenNumberAsFailure: Try[Int] => Boolean = {
case Success(n) => n % 2 == 0
case Failure(_) => true
val evenNumberAsFailure: Try[Int] Boolean = {
case Success(n) n % 2 == 0
case Failure(_) true
}
val breaker =
@ -100,6 +100,6 @@ class EvenNoFailureActor extends Actor {
//#even-no-as-failure
override def receive = {
case x: Int =>
case x: Int
}
}

View file

@ -15,8 +15,8 @@ class FactorialBackend extends Actor with ActorLogging {
import context.dispatcher
def receive = {
case (n: Int) =>
Future(factorial(n)) map { result => (n, result) } pipeTo sender()
case (n: Int)
Future(factorial(n)) map { result (n, result) } pipeTo sender()
}
def factorial(n: Int): BigInt = {
@ -43,4 +43,4 @@ object FactorialBackend {
system.actorOf(Props[MetricsListener], name = "metricsListener")
}
}
}

View file

@ -27,13 +27,13 @@ class FactorialFrontend(upToN: Int, repeat: Boolean) extends Actor with ActorLog
}
def receive = {
case (n: Int, factorial: BigInt) =>
case (n: Int, factorial: BigInt)
if (n == upToN) {
log.debug("{}! = {}", n, factorial)
if (repeat) sendJobs()
else context.stop(self)
}
case ReceiveTimeout =>
case ReceiveTimeout
log.info("Timeout")
sendJobs()
}

View file

@ -23,24 +23,24 @@ class MetricsListener extends Actor with ActorLogging {
override def postStop(): Unit = extension.unsubscribe(self)
def receive = {
case ClusterMetricsChanged(clusterMetrics) =>
clusterMetrics.filter(_.address == selfAddress) foreach { nodeMetrics =>
case ClusterMetricsChanged(clusterMetrics)
clusterMetrics.filter(_.address == selfAddress) foreach { nodeMetrics
logHeap(nodeMetrics)
logCpu(nodeMetrics)
}
case state: CurrentClusterState => // Ignore.
case state: CurrentClusterState // Ignore.
}
def logHeap(nodeMetrics: NodeMetrics): Unit = nodeMetrics match {
case HeapMemory(address, timestamp, used, committed, max) =>
case HeapMemory(address, timestamp, used, committed, max)
log.info("Used heap: {} MB", used.doubleValue / 1024 / 1024)
case _ => // No heap info.
case _ // No heap info.
}
def logCpu(nodeMetrics: NodeMetrics): Unit = nodeMetrics match {
case Cpu(address, timestamp, Some(systemLoadAverage), cpuCombined, cpuStolen, processors) =>
case Cpu(address, timestamp, Some(systemLoadAverage), cpuCombined, cpuStolen, processors)
log.info("Load: {} ({} processors)", systemLoadAverage, processors)
case _ => // No cpu info.
case _ // No cpu info.
}
}
//#metrics-listener

View file

@ -19,14 +19,14 @@ class SimpleClusterListener extends Actor with ActorLogging {
override def postStop(): Unit = cluster.unsubscribe(self)
def receive = {
case MemberUp(member) =>
case MemberUp(member)
log.info("Member is Up: {}", member.address)
case UnreachableMember(member) =>
case UnreachableMember(member)
log.info("Member detected as unreachable: {}", member)
case MemberRemoved(member, previousStatus) =>
case MemberRemoved(member, previousStatus)
log.info(
"Member is Removed: {} after {}",
member.address, previousStatus)
case _: MemberEvent => // ignore
case _: MemberEvent // ignore
}
}

View file

@ -18,16 +18,16 @@ class SimpleClusterListener2 extends Actor with ActorLogging {
override def postStop(): Unit = cluster.unsubscribe(self)
def receive = {
case state: CurrentClusterState =>
case state: CurrentClusterState
log.info("Current members: {}", state.members.mkString(", "))
case MemberUp(member) =>
case MemberUp(member)
log.info("Member is Up: {}", member.address)
case UnreachableMember(member) =>
case UnreachableMember(member)
log.info("Member detected as unreachable: {}", member)
case MemberRemoved(member, previousStatus) =>
case MemberRemoved(member, previousStatus)
log.info(
"Member is Removed: {} after {}",
member.address, previousStatus)
case _: MemberEvent => // ignore
case _: MemberEvent // ignore
}
}

View file

@ -25,10 +25,10 @@ class TransformationBackend extends Actor {
override def postStop(): Unit = cluster.unsubscribe(self)
def receive = {
case TransformationJob(text) => sender() ! TransformationResult(text.toUpperCase)
case state: CurrentClusterState =>
case TransformationJob(text) sender() ! TransformationResult(text.toUpperCase)
case state: CurrentClusterState
state.members.filter(_.status == MemberStatus.Up) foreach register
case MemberUp(m) => register(m)
case MemberUp(m) register(m)
}
def register(member: Member): Unit =

View file

@ -19,18 +19,18 @@ class TransformationFrontend extends Actor {
var jobCounter = 0
def receive = {
case job: TransformationJob if backends.isEmpty =>
case job: TransformationJob if backends.isEmpty
sender() ! JobFailed("Service unavailable, try again later", job)
case job: TransformationJob =>
case job: TransformationJob
jobCounter += 1
backends(jobCounter % backends.size) forward job
case BackendRegistration if !backends.contains(sender()) =>
case BackendRegistration if !backends.contains(sender())
context watch sender()
backends = backends :+ sender()
case Terminated(a) =>
case Terminated(a)
backends = backends.filterNot(_ == a)
}
}
@ -52,7 +52,7 @@ object TransformationFrontend {
system.scheduler.schedule(2.seconds, 2.seconds) {
implicit val timeout = Timeout(5 seconds)
(frontend ? TransformationJob("hello-" + counter.incrementAndGet())) onSuccess {
case result => println(result)
case result println(result)
}
}

View file

@ -5,4 +5,4 @@ final case class TransformationJob(text: String)
final case class TransformationResult(text: String)
final case class JobFailed(reason: String, job: TransformationJob)
case object BackendRegistration
//#messages
//#messages

View file

@ -72,7 +72,7 @@ object DistributedDataDocSpec {
replicator ! Subscribe(DataKey, self)
def receive = {
case Tick =>
case Tick
val s = ThreadLocalRandom.current().nextInt(97, 123).toChar.toString
if (ThreadLocalRandom.current().nextBoolean()) {
// add
@ -84,9 +84,9 @@ object DistributedDataDocSpec {
replicator ! Update(DataKey, ORSet.empty[String], WriteLocal)(_ - s)
}
case _: UpdateResponse[_] => // ignore
case _: UpdateResponse[_] // ignore
case c @ Changed(DataKey) =>
case c @ Changed(DataKey)
val data = c.get(DataKey)
log.info("Current elements: {}", data.elements)
}
@ -128,19 +128,19 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) {
probe.expectMsgType[UpdateResponse[_]] match {
//#update-response1
case UpdateSuccess(Counter1Key, req) => // ok
case UpdateSuccess(Counter1Key, req) // ok
//#update-response1
case unexpected => fail("Unexpected response: " + unexpected)
case unexpected fail("Unexpected response: " + unexpected)
}
probe.expectMsgType[UpdateResponse[_]] match {
//#update-response2
case UpdateSuccess(Set1Key, req) => // ok
case UpdateTimeout(Set1Key, req) =>
case UpdateSuccess(Set1Key, req) // ok
case UpdateTimeout(Set1Key, req)
// write to 3 nodes failed within 1.second
//#update-response2
case UpdateSuccess(Set2Key, None) =>
case unexpected => fail("Unexpected response: " + unexpected)
case UpdateSuccess(Set2Key, None)
case unexpected fail("Unexpected response: " + unexpected)
}
}
@ -157,14 +157,14 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) {
val Counter1Key = PNCounterKey("counter1")
def receive: Receive = {
case "increment" =>
case "increment"
// incoming command to increase the counter
val upd = Update(Counter1Key, PNCounter(), writeTwo, request = Some(sender()))(_ + 1)
replicator ! upd
case UpdateSuccess(Counter1Key, Some(replyTo: ActorRef)) =>
case UpdateSuccess(Counter1Key, Some(replyTo: ActorRef))
replyTo ! "ack"
case UpdateTimeout(Counter1Key, Some(replyTo: ActorRef)) =>
case UpdateTimeout(Counter1Key, Some(replyTo: ActorRef))
replyTo ! "nack"
}
//#update-request-context
@ -195,24 +195,24 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) {
probe.expectMsgType[GetResponse[_]] match {
//#get-response1
case g @ GetSuccess(Counter1Key, req) =>
case g @ GetSuccess(Counter1Key, req)
val value = g.get(Counter1Key).value
case NotFound(Counter1Key, req) => // key counter1 does not exist
case NotFound(Counter1Key, req) // key counter1 does not exist
//#get-response1
case unexpected => fail("Unexpected response: " + unexpected)
case unexpected fail("Unexpected response: " + unexpected)
}
probe.expectMsgType[GetResponse[_]] match {
//#get-response2
case g @ GetSuccess(Set1Key, req) =>
case g @ GetSuccess(Set1Key, req)
val elements = g.get(Set1Key).elements
case GetFailure(Set1Key, req) =>
case GetFailure(Set1Key, req)
// read from 3 nodes failed within 1.second
case NotFound(Set1Key, req) => // key set1 does not exist
case NotFound(Set1Key, req) // key set1 does not exist
//#get-response2
case g @ GetSuccess(Set2Key, None) =>
case g @ GetSuccess(Set2Key, None)
val elements = g.get(Set2Key).elements
case unexpected => fail("Unexpected response: " + unexpected)
case unexpected fail("Unexpected response: " + unexpected)
}
}
@ -229,16 +229,16 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) {
val Counter1Key = PNCounterKey("counter1")
def receive: Receive = {
case "get-count" =>
case "get-count"
// incoming request to retrieve current value of the counter
replicator ! Get(Counter1Key, readTwo, request = Some(sender()))
case g @ GetSuccess(Counter1Key, Some(replyTo: ActorRef)) =>
case g @ GetSuccess(Counter1Key, Some(replyTo: ActorRef))
val value = g.get(Counter1Key).value.longValue
replyTo ! value
case GetFailure(Counter1Key, Some(replyTo: ActorRef)) =>
case GetFailure(Counter1Key, Some(replyTo: ActorRef))
replyTo ! -1L
case NotFound(Counter1Key, Some(replyTo: ActorRef)) =>
case NotFound(Counter1Key, Some(replyTo: ActorRef))
replyTo ! 0L
}
//#get-request-context
@ -258,9 +258,9 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) {
var currentValue = BigInt(0)
def receive: Receive = {
case c @ Changed(Counter1Key) =>
case c @ Changed(Counter1Key)
currentValue = c.get(Counter1Key).value
case "get-count" =>
case "get-count"
// incoming request to retrieve current value of the counter
sender() ! currentValue
}
@ -304,7 +304,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) {
val m2 = m1.decrement("a", 2)
val m3 = m2.increment("b", 1)
println(m3.get("a")) // 5
m3.entries.foreach { case (key, value) => println(s"$key -> $value") }
m3.entries.foreach { case (key, value) println(s"$key -> $value") }
//#pncountermap
}

View file

@ -45,18 +45,18 @@ class ShoppingCart(userId: String) extends Actor {
//#get-cart
def receiveGetCart: Receive = {
case GetCart =>
case GetCart
replicator ! Get(DataKey, readMajority, Some(sender()))
case g @ GetSuccess(DataKey, Some(replyTo: ActorRef)) =>
case g @ GetSuccess(DataKey, Some(replyTo: ActorRef))
val data = g.get(DataKey)
val cart = Cart(data.entries.values.toSet)
replyTo ! cart
case NotFound(DataKey, Some(replyTo: ActorRef)) =>
case NotFound(DataKey, Some(replyTo: ActorRef))
replyTo ! Cart(Set.empty)
case GetFailure(DataKey, Some(replyTo: ActorRef)) =>
case GetFailure(DataKey, Some(replyTo: ActorRef))
// ReadMajority failure, try again with local read
replicator ! Get(DataKey, ReadLocal, Some(replyTo))
}
@ -64,9 +64,9 @@ class ShoppingCart(userId: String) extends Actor {
//#add-item
def receiveAddItem: Receive = {
case cmd @ AddItem(item) =>
case cmd @ AddItem(item)
val update = Update(DataKey, LWWMap.empty[String, LineItem], writeMajority, Some(cmd)) {
cart => updateCart(cart, item)
cart updateCart(cart, item)
}
replicator ! update
}
@ -74,38 +74,38 @@ class ShoppingCart(userId: String) extends Actor {
def updateCart(data: LWWMap[String, LineItem], item: LineItem): LWWMap[String, LineItem] =
data.get(item.productId) match {
case Some(LineItem(_, _, existingQuantity)) =>
case Some(LineItem(_, _, existingQuantity))
data + (item.productId -> item.copy(quantity = existingQuantity + item.quantity))
case None => data + (item.productId -> item)
case None data + (item.productId -> item)
}
//#remove-item
def receiveRemoveItem: Receive = {
case cmd @ RemoveItem(productId) =>
case cmd @ RemoveItem(productId)
// Try to fetch latest from a majority of nodes first, since ORMap
// remove must have seen the item to be able to remove it.
replicator ! Get(DataKey, readMajority, Some(cmd))
case GetSuccess(DataKey, Some(RemoveItem(productId))) =>
case GetSuccess(DataKey, Some(RemoveItem(productId)))
replicator ! Update(DataKey, LWWMap(), writeMajority, None) {
_ - productId
}
case GetFailure(DataKey, Some(RemoveItem(productId))) =>
case GetFailure(DataKey, Some(RemoveItem(productId)))
// ReadMajority failed, fall back to best effort local value
replicator ! Update(DataKey, LWWMap(), writeMajority, None) {
_ - productId
}
case NotFound(DataKey, Some(RemoveItem(productId))) =>
case NotFound(DataKey, Some(RemoveItem(productId)))
// nothing to remove
}
//#remove-item
def receiveOther: Receive = {
case _: UpdateSuccess[_] | _: UpdateTimeout[_] =>
case _: UpdateSuccess[_] | _: UpdateTimeout[_]
// UpdateTimeout, will eventually be replicated
case e: UpdateFailure[_] => throw new IllegalStateException("Unexpected failure: " + e)
case e: UpdateFailure[_] throw new IllegalStateException("Unexpected failure: " + e)
}
}

View file

@ -22,8 +22,8 @@ class TwoPhaseSetSerializer(val system: ExtendedActorSystem)
override def identifier = 99999
override def toBinary(obj: AnyRef): Array[Byte] = obj match {
case m: TwoPhaseSet => twoPhaseSetToProto(m).toByteArray
case _ => throw new IllegalArgumentException(
case m: TwoPhaseSet twoPhaseSetToProto(m).toByteArray
case _ throw new IllegalArgumentException(
s"Can't serialize object of type ${obj.getClass}")
}
@ -53,8 +53,8 @@ class TwoPhaseSetSerializer(val system: ExtendedActorSystem)
val msg = TwoPhaseSetMessages.TwoPhaseSet.parseFrom(bytes)
val addsSet = msg.getAddsList.iterator.asScala.toSet
val removalsSet = msg.getRemovalsList.iterator.asScala.toSet
val adds = addsSet.foldLeft(GSet.empty[String])((acc, el) => acc.add(el))
val removals = removalsSet.foldLeft(GSet.empty[String])((acc, el) => acc.add(el))
val adds = addsSet.foldLeft(GSet.empty[String])((acc, el) acc.add(el))
val removals = removalsSet.foldLeft(GSet.empty[String])((acc, el) acc.add(el))
// GSet will accumulate deltas when adding elements,
// but those are not of interest in the result of the deserialization
TwoPhaseSet(adds.resetDelta, removals.resetDelta)
@ -66,8 +66,8 @@ class TwoPhaseSetSerializerWithCompression(system: ExtendedActorSystem)
extends TwoPhaseSetSerializer(system) {
//#compression
override def toBinary(obj: AnyRef): Array[Byte] = obj match {
case m: TwoPhaseSet => compress(twoPhaseSetToProto(m))
case _ => throw new IllegalArgumentException(
case m: TwoPhaseSet compress(twoPhaseSetToProto(m))
case _ throw new IllegalArgumentException(
s"Can't serialize object of type ${obj.getClass}")
}

View file

@ -22,8 +22,8 @@ class TwoPhaseSetSerializer2(val system: ExtendedActorSystem)
val replicatedDataSerializer = new ReplicatedDataSerializer(system)
override def toBinary(obj: AnyRef): Array[Byte] = obj match {
case m: TwoPhaseSet => twoPhaseSetToProto(m).toByteArray
case _ => throw new IllegalArgumentException(
case m: TwoPhaseSet twoPhaseSetToProto(m).toByteArray
case _ throw new IllegalArgumentException(
s"Can't serialize object of type ${obj.getClass}")
}

View file

@ -229,16 +229,16 @@ object DispatcherDocSpec {
// Create a new PriorityGenerator, lower prio means more important
PriorityGenerator {
// 'highpriority messages should be treated first if possible
case 'highpriority => 0
case 'highpriority 0
// 'lowpriority messages should be treated last if possible
case 'lowpriority => 2
case 'lowpriority 2
// PoisonPill when no other left
case PoisonPill => 3
case PoisonPill 3
// We default to 1, which is in between high and low
case otherwise => 1
case otherwise 1
})
//#prio-mailbox
@ -250,7 +250,7 @@ object DispatcherDocSpec {
class MyActor extends Actor {
def receive = {
case x =>
case x
}
}
@ -267,7 +267,7 @@ object DispatcherDocSpec {
with RequiresMessageQueue[MyUnboundedMessageQueueSemantics] {
//#require-mailbox-on-actor
def receive = {
case _ =>
case _
}
//#require-mailbox-on-actor
// ...
@ -370,7 +370,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) {
self ! PoisonPill
def receive = {
case x => log.info(x.toString)
case x log.info(x.toString)
}
}
val a = system.actorOf(Props(classOf[Logger], this).withDispatcher(
@ -389,7 +389,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) {
//#prio-dispatcher
watch(a)
expectMsgPF() { case Terminated(`a`) => () }
expectMsgPF() { case Terminated(`a`) () }
}
}
@ -407,7 +407,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) {
self ! PoisonPill
def receive = {
case x => log.info(x.toString)
case x log.info(x.toString)
}
}
val a = system.actorOf(Props(classOf[Logger], this).withDispatcher(
@ -422,7 +422,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) {
//#control-aware-dispatcher
watch(a)
expectMsgPF() { case Terminated(`a`) => () }
expectMsgPF() { case Terminated(`a`) () }
}
}

View file

@ -59,4 +59,4 @@ class MyUnboundedMailbox extends MailboxType
//#mailbox-marker-interface
// Marker trait used for mailbox requirements mapping
trait MyUnboundedMessageQueueSemantics
//#mailbox-marker-interface
//#mailbox-marker-interface

View file

@ -25,7 +25,7 @@ object EventBusDocSpec {
type Classifier = String
type Subscriber = ActorRef
// is used for extracting the classifier from the incoming events
// is used for extracting the classifier from the incoming events
override protected def classify(event: Event): Classifier = event.topic
// will be invoked for each event for all subscribers which registered themselves
@ -74,7 +74,7 @@ object EventBusDocSpec {
override protected val subclassification: Subclassification[Classifier] =
new StartsWithSubclassification
// is used for extracting the classifier from the incoming events
// is used for extracting the classifier from the incoming events
override protected def classify(event: Event): Classifier = event.topic
// will be invoked for each event for all subscribers which registered
@ -102,7 +102,7 @@ object EventBusDocSpec {
override protected def compareClassifiers(a: Classifier, b: Classifier): Int =
if (a < b) -1 else if (a == b) 0 else 1
// is needed for storing subscribers in an ordered collection
// is needed for storing subscribers in an ordered collection
override protected def compareSubscribers(a: Subscriber, b: Subscriber): Int =
a.compareTo(b)

View file

@ -21,8 +21,8 @@ object LoggingDocSpec {
reason.getMessage, message.getOrElse(""))
}
def receive = {
case "test" => log.info("Received test")
case x => log.warning("Received unknown message: {}", x)
case "test" log.info("Received test")
case x log.warning("Received unknown message: {}", x)
}
}
//#my-actor
@ -33,7 +33,7 @@ object LoggingDocSpec {
val log = Logging(this)
def receive = {
case _ => {
case _ {
//#mdc
val mdc = Map("requestId" -> 1234, "visitorId" -> 5678)
log.mdc(mdc)
@ -59,14 +59,14 @@ object LoggingDocSpec {
reqId += 1
val always = Map("requestId" -> reqId)
val perMessage = currentMessage match {
case r: Req => Map("visitorId" -> r.visitorId)
case _ => Map()
case r: Req Map("visitorId" -> r.visitorId)
case _ Map()
}
always ++ perMessage
}
def receive: Receive = {
case r: Req => {
case r: Req {
log.info(s"Starting new request: ${r.work}")
}
}
@ -84,11 +84,11 @@ object LoggingDocSpec {
class MyEventListener extends Actor {
def receive = {
case InitializeLogger(_) => sender() ! LoggerInitialized
case Error(cause, logSource, logClass, message) => // ...
case Warning(logSource, logClass, message) => // ...
case Info(logSource, logClass, message) => // ...
case Debug(logSource, logClass, message) => // ...
case InitializeLogger(_) sender() ! LoggerInitialized
case Error(cause, logSource, logClass, message) // ...
case Warning(logSource, logClass, message) // ...
case Info(logSource, logClass, message) // ...
case Debug(logSource, logClass, message) // ...
}
}
//#my-event-listener
@ -120,7 +120,7 @@ object LoggingDocSpec {
class DeadLetterListener extends Actor {
def receive = {
case d: DeadLetter => println(d)
case d: DeadLetter println(d)
}
}
//#deadletters
@ -132,8 +132,8 @@ object LoggingDocSpec {
class Listener extends Actor {
def receive = {
case m: Jazz => println(s"${self.path.name} is listening to: ${m.artist}")
case m: Electronic => println(s"${self.path.name} is listening to: ${m.artist}")
case m: Jazz println(s"${self.path.name} is listening to: ${m.artist}")
case m: Electronic println(s"${self.path.name} is listening to: ${m.artist}")
}
}
//#superclass-subscription-eventstream

View file

@ -60,7 +60,7 @@ object ExtensionDocSpec {
class MyActor extends Actor {
def receive = {
case someMessage =>
case someMessage
CountExtension(context.system).increment()
}
}
@ -68,12 +68,12 @@ object ExtensionDocSpec {
//#extension-usage-actor-trait
trait Counting { self: Actor =>
trait Counting { self: Actor
def increment() = CountExtension(context.system).increment()
}
class MyCounterActor extends Actor with Counting {
def receive = {
case someMessage => increment()
case someMessage increment()
}
}
//#extension-usage-actor-trait

View file

@ -66,7 +66,7 @@ object SettingsExtensionDocSpec {
//#extension-usage-actor
def receive = {
case someMessage =>
case someMessage
}
def connect(dbUri: String, circuitBreakerTimeout: Duration) = {

View file

@ -17,14 +17,14 @@ object MyActor {
class MyActor extends Actor {
import MyActor._
def receive = {
case message: Message => message match {
case BarMessage(bar) => sender() ! BazMessage("Got " + bar)
// warning here:
case message: Message message match {
case BarMessage(bar) sender() ! BazMessage("Got " + bar)
// warning here:
// "match may not be exhaustive. It would fail on the following input: FooMessage(_)"
//#exhaustiveness-check
case FooMessage(_) => // avoid the warning in our build logs
case FooMessage(_) // avoid the warning in our build logs
//#exhaustiveness-check
}
}
}
//#exhaustiveness-check
//#exhaustiveness-check

View file

@ -18,9 +18,9 @@ object FutureDocSpec {
class MyActor extends Actor {
def receive = {
case x: String => sender() ! x.toUpperCase
case x: Int if x < 0 => sender() ! Status.Failure(new ArithmeticException("Negative values not supported"))
case x: Int => sender() ! x
case x: String sender() ! x.toUpperCase
case x: Int if x < 0 sender() ! Status.Failure(new ArithmeticException("Negative values not supported"))
case x: Int sender() ! x
}
}
@ -29,7 +29,7 @@ object FutureDocSpec {
class OddActor extends Actor {
var n = 1
def receive = {
case GetNext =>
case GetNext
sender() ! n
n += 2
}
@ -40,7 +40,7 @@ class FutureDocSpec extends AkkaSpec {
import FutureDocSpec._
import system.dispatcher
val println: PartialFunction[Any, Unit] = { case _ => }
val println: PartialFunction[Any, Unit] = { case _ }
"demonstrate usage custom ExecutionContext" in {
val yourExecutorServiceGoesHere = java.util.concurrent.Executors.newSingleThreadExecutor()
@ -112,7 +112,7 @@ class FutureDocSpec extends AkkaSpec {
val f1 = Future {
"Hello" + "World"
}
val f2 = f1 map { x =>
val f2 = f1 map { x
x.length
}
f2 foreach println
@ -128,8 +128,8 @@ class FutureDocSpec extends AkkaSpec {
"Hello" + "World"
}
val f2 = Future.successful(3)
val f3 = f1 map { x =>
f2 map { y =>
val f3 = f1 map { x
f2 map { y
x.length * y
}
}
@ -144,8 +144,8 @@ class FutureDocSpec extends AkkaSpec {
"Hello" + "World"
}
val f2 = Future.successful(3)
val f3 = f1 flatMap { x =>
f2 map { y =>
val f3 = f1 flatMap { x
f2 map { y
x.length * y
}
}
@ -164,7 +164,7 @@ class FutureDocSpec extends AkkaSpec {
val failedFilter = future1.filter(_ % 2 == 1).recover {
// When filter fails, it will have a java.util.NoSuchElementException
case m: NoSuchElementException => 0
case m: NoSuchElementException 0
}
failedFilter foreach println
@ -178,9 +178,9 @@ class FutureDocSpec extends AkkaSpec {
"demonstrate usage of for comprehension" in {
//#for-comprehension
val f = for {
a <- Future(10 / 2) // 10 / 2 = 5
b <- Future(a + 1) // 5 + 1 = 6
c <- Future(a - 1) // 5 - 1 = 4
a Future(10 / 2) // 10 / 2 = 5
b Future(a + 1) // 5 + 1 = 6
c Future(a - 1) // 5 - 1 = 4
if c > 3 // Future.filter
} yield b * c // 6 * 4 = 24
@ -232,9 +232,9 @@ class FutureDocSpec extends AkkaSpec {
val f2 = ask(actor2, msg2)
val f3 = for {
a <- f1.mapTo[Int]
b <- f2.mapTo[Int]
c <- ask(actor3, (a + b)).mapTo[Int]
a f1.mapTo[Int]
b f2.mapTo[Int]
c ask(actor3, (a + b)).mapTo[Int]
} yield c
f3 foreach println
@ -262,7 +262,7 @@ class FutureDocSpec extends AkkaSpec {
"demonstrate usage of sequence" in {
//#sequence
val futureList = Future.sequence((1 to 100).toList.map(x => Future(x * 2 - 1)))
val futureList = Future.sequence((1 to 100).toList.map(x Future(x * 2 - 1)))
val oddSum = futureList.map(_.sum)
oddSum foreach println
//#sequence
@ -271,7 +271,7 @@ class FutureDocSpec extends AkkaSpec {
"demonstrate usage of traverse" in {
//#traverse
val futureList = Future.traverse((1 to 100).toList)(x => Future(x * 2 - 1))
val futureList = Future.traverse((1 to 100).toList)(x Future(x * 2 - 1))
val oddSum = futureList.map(_.sum)
oddSum foreach println
//#traverse
@ -281,7 +281,7 @@ class FutureDocSpec extends AkkaSpec {
"demonstrate usage of fold" in {
//#fold
// Create a sequence of Futures
val futures = for (i <- 1 to 1000) yield Future(i * 2)
val futures = for (i 1 to 1000) yield Future(i * 2)
val futureSum = Future.fold(futures)(0)(_ + _)
futureSum foreach println
//#fold
@ -291,7 +291,7 @@ class FutureDocSpec extends AkkaSpec {
"demonstrate usage of reduce" in {
//#reduce
// Create a sequence of Futures
val futures = for (i <- 1 to 1000) yield Future(i * 2)
val futures = for (i 1 to 1000) yield Future(i * 2)
val futureSum = Future.reduce(futures)(_ + _)
futureSum foreach println
//#reduce
@ -304,7 +304,7 @@ class FutureDocSpec extends AkkaSpec {
val msg1 = -1
//#recover
val future = akka.pattern.ask(actor, msg1) recover {
case e: ArithmeticException => 0
case e: ArithmeticException 0
}
future foreach println
//#recover
@ -317,8 +317,8 @@ class FutureDocSpec extends AkkaSpec {
val msg1 = -1
//#try-recover
val future = akka.pattern.ask(actor, msg1) recoverWith {
case e: ArithmeticException => Future.successful(0)
case foo: IllegalArgumentException =>
case e: ArithmeticException Future.successful(0)
case foo: IllegalArgumentException
Future.failed[Int](new IllegalStateException("All br0ken!"))
}
future foreach println
@ -330,7 +330,7 @@ class FutureDocSpec extends AkkaSpec {
val future1 = Future { "foo" }
val future2 = Future { "bar" }
//#zip
val future3 = future1 zip future2 map { case (a, b) => a + " " + b }
val future3 = future1 zip future2 map { case (a, b) a + " " + b }
future3 foreach println
//#zip
Await.result(future3, 3 seconds) should be("foo bar")
@ -343,9 +343,9 @@ class FutureDocSpec extends AkkaSpec {
def watchSomeTV(): Unit = ()
//#and-then
val result = Future { loadPage(url) } andThen {
case Failure(exception) => log(exception)
case Failure(exception) log(exception)
} andThen {
case _ => watchSomeTV()
case _ watchSomeTV()
}
result foreach println
//#and-then
@ -368,8 +368,8 @@ class FutureDocSpec extends AkkaSpec {
val future = Future { "foo" }
//#onSuccess
future onSuccess {
case "bar" => println("Got my bar alright!")
case x: String => println("Got some random string: " + x)
case "bar" println("Got my bar alright!")
case x: String println("Got some random string: " + x)
}
//#onSuccess
Await.result(future, 3 seconds) should be("foo")
@ -378,9 +378,9 @@ class FutureDocSpec extends AkkaSpec {
val future = Future.failed[String](new IllegalStateException("OHNOES"))
//#onFailure
future onFailure {
case ise: IllegalStateException if ise.getMessage == "OHNOES" =>
case ise: IllegalStateException if ise.getMessage == "OHNOES"
//OHNOES! We are in deep trouble, do something!
case e: Exception =>
case e: Exception
//Do something else
}
//#onFailure
@ -391,8 +391,8 @@ class FutureDocSpec extends AkkaSpec {
def doSomethingOnFailure(t: Throwable) = ()
//#onComplete
future onComplete {
case Success(result) => doSomethingOnSuccess(result)
case Failure(failure) => doSomethingOnFailure(failure)
case Success(result) doSomethingOnSuccess(result)
case Failure(failure) doSomethingOnFailure(failure)
}
//#onComplete
Await.result(future, 3 seconds) should be("foo")
@ -436,7 +436,7 @@ class FutureDocSpec extends AkkaSpec {
val f = Future("hello")
def receive = {
//#receive-omitted
case _ =>
case _
//#receive-omitted
}
}

View file

@ -53,15 +53,15 @@ class EchoManager(handlerClass: Class[_]) extends Actor with ActorLogging {
override def postRestart(thr: Throwable): Unit = context stop self
def receive = {
case Bound(localAddress) =>
case Bound(localAddress)
log.info("listening on port {}", localAddress.getPort)
case CommandFailed(Bind(_, local, _, _, _)) =>
case CommandFailed(Bind(_, local, _, _, _))
log.warning(s"cannot bind to [$local]")
context stop self
//#echo-manager
case Connected(remote, local) =>
case Connected(remote, local)
log.info("received connection from {}", remote)
val handler = context.actorOf(Props(handlerClass, sender(), remote))
sender() ! Register(handler, keepOpenOnPeerClosed = true)
@ -92,18 +92,18 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress)
//#writing
def writing: Receive = {
case Received(data) =>
case Received(data)
connection ! Write(data, Ack(currentOffset))
buffer(data)
case Ack(ack) =>
case Ack(ack)
acknowledge(ack)
case CommandFailed(Write(_, Ack(ack))) =>
case CommandFailed(Write(_, Ack(ack)))
connection ! ResumeWriting
context become buffering(ack)
case PeerClosed =>
case PeerClosed
if (storage.isEmpty) context stop self
else context become closing
}
@ -115,11 +115,11 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress)
var peerClosed = false
{
case Received(data) => buffer(data)
case WritingResumed => writeFirst()
case PeerClosed => peerClosed = true
case Ack(ack) if ack < nack => acknowledge(ack)
case Ack(ack) =>
case Received(data) buffer(data)
case WritingResumed writeFirst()
case PeerClosed peerClosed = true
case Ack(ack) if ack < nack acknowledge(ack)
case Ack(ack)
acknowledge(ack)
if (storage.nonEmpty) {
if (toAck > 0) {
@ -139,19 +139,19 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress)
//#closing
def closing: Receive = {
case CommandFailed(_: Write) =>
case CommandFailed(_: Write)
connection ! ResumeWriting
context.become({
case WritingResumed =>
case WritingResumed
writeAll()
context.unbecome()
case ack: Int => acknowledge(ack)
case ack: Int acknowledge(ack)
}, discardOld = false)
case Ack(ack) =>
case Ack(ack)
acknowledge(ack)
if (storage.isEmpty) context stop self
}
@ -214,7 +214,7 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress)
}
private def writeAll(): Unit = {
for ((data, i) <- storage.zipWithIndex) {
for ((data, i) storage.zipWithIndex) {
connection ! Write(data, Ack(storageOffset + i))
}
}
@ -235,17 +235,17 @@ class SimpleEchoHandler(connection: ActorRef, remote: InetSocketAddress)
case object Ack extends Event
def receive = {
case Received(data) =>
case Received(data)
buffer(data)
connection ! Write(data, Ack)
context.become({
case Received(data) => buffer(data)
case Ack => acknowledge()
case PeerClosed => closing = true
case Received(data) buffer(data)
case Ack acknowledge()
case PeerClosed closing = true
}, discardOld = false)
case PeerClosed => context stop self
case PeerClosed context stop self
}
//#storage-omitted

View file

@ -34,14 +34,14 @@ class Server extends Actor {
IO(Tcp) ! Bind(self, new InetSocketAddress("localhost", 0))
def receive = {
case b @ Bound(localAddress) =>
case b @ Bound(localAddress)
//#do-some-logging-or-setup
context.parent ! b
//#do-some-logging-or-setup
case CommandFailed(_: Bind) => context stop self
case CommandFailed(_: Bind) context stop self
case c @ Connected(remote, local) =>
case c @ Connected(remote, local)
//#server
context.parent ! c
//#server
@ -57,8 +57,8 @@ class Server extends Actor {
class SimplisticHandler extends Actor {
import Tcp._
def receive = {
case Received(data) => sender() ! Write(data)
case PeerClosed => context stop self
case Received(data) sender() ! Write(data)
case PeerClosed context stop self
}
}
//#simplistic-handler
@ -77,25 +77,25 @@ class Client(remote: InetSocketAddress, listener: ActorRef) extends Actor {
IO(Tcp) ! Connect(remote)
def receive = {
case CommandFailed(_: Connect) =>
case CommandFailed(_: Connect)
listener ! "connect failed"
context stop self
case c @ Connected(remote, local) =>
case c @ Connected(remote, local)
listener ! c
val connection = sender()
connection ! Register(self)
context become {
case data: ByteString =>
case data: ByteString
connection ! Write(data)
case CommandFailed(w: Write) =>
case CommandFailed(w: Write)
// O/S buffer was full
listener ! "write failed"
case Received(data) =>
case Received(data)
listener ! data
case "close" =>
case "close"
connection ! Close
case _: ConnectionClosed =>
case _: ConnectionClosed
listener ! "connection closed"
context stop self
}
@ -108,7 +108,7 @@ class IODocSpec extends AkkaSpec {
class Parent extends Actor {
context.actorOf(Props[Server], "server")
def receive = {
case msg => testActor forward msg
case msg testActor forward msg
}
}

View file

@ -26,7 +26,7 @@ object PullReadingExample {
def receive = {
//#pull-accepting
case Bound(localAddress) =>
case Bound(localAddress)
// Accept connections one by one
sender() ! ResumeAccepting(batchSize = 1)
context.become(listening(sender()))
@ -36,7 +36,7 @@ object PullReadingExample {
//#pull-accepting-cont
def listening(listener: ActorRef): Receive = {
case Connected(remote, local) =>
case Connected(remote, local)
val handler = context.actorOf(Props(classOf[PullEcho], sender()))
sender() ! Register(handler, keepOpenOnPeerClosed = true)
listener ! ResumeAccepting(batchSize = 1)
@ -53,8 +53,8 @@ object PullReadingExample {
override def preStart: Unit = connection ! ResumeReading
def receive = {
case Received(data) => connection ! Write(data, Ack)
case Ack => connection ! ResumeReading
case Received(data) connection ! Write(data, Ack)
case Ack connection ! ResumeReading
}
//#pull-reading-echo
}

View file

@ -38,10 +38,10 @@ class Listener(iface: String, group: String, port: Int, sink: ActorRef) extends
//#bind
def receive = {
case b @ Udp.Bound(to) =>
case b @ Udp.Bound(to)
log.info("Bound to {}", to)
sink ! (b)
case Udp.Received(data, remote) =>
case Udp.Received(data, remote)
val msg = data.decodeString("utf-8")
log.info("Received '{}' from {}", msg, remote)
sink ! msg
@ -53,7 +53,7 @@ class Sender(iface: String, group: String, port: Int, msg: String) extends Actor
IO(Udp) ! Udp.SimpleSender(List(Inet6ProtocolFamily()))
def receive = {
case Udp.SimpleSenderReady => {
case Udp.SimpleSenderReady {
val remote = new InetSocketAddress(s"$group%$iface", port)
log.info("Sending message to {}", remote)
sender() ! Udp.Send(ByteString(msg), remote)

View file

@ -20,7 +20,7 @@ class ScalaUdpMulticastSpec extends TestKit(ActorSystem("ScalaUdpMulticastSpec")
"listener" should {
"send message back to sink" in {
val ipv6ifaces =
NetworkInterface.getNetworkInterfaces.toSeq.filter(iface =>
NetworkInterface.getNetworkInterfaces.toSeq.filter(iface
iface.supportsMulticast &&
iface.isUp &&
iface.getInetAddresses.exists(_.isInstanceOf[Inet6Address]))
@ -33,7 +33,7 @@ class ScalaUdpMulticastSpec extends TestKit(ActorSystem("ScalaUdpMulticastSpec")
// on the platform (awsdl0 can't be used on OSX, docker[0-9] can't be used in a docker machine etc.)
// therefore: try hard to find an interface that _does_ work, and only fail if there was any potentially
// working interfaces but all failed
ipv6ifaces.exists { ipv6iface =>
ipv6ifaces.exists { ipv6iface
// host assigned link local multicast address http://tools.ietf.org/html/rfc3307#section-4.3.2
// generate a random 32 bit multicast address with the high order bit set
val randomAddress: String = (Random.nextInt().abs.toLong | (1L << 31)).toHexString.toUpperCase
@ -51,7 +51,7 @@ class ScalaUdpMulticastSpec extends TestKit(ActorSystem("ScalaUdpMulticastSpec")
true
} catch {
case _: AssertionError =>
case _: AssertionError
system.log.info("Failed to run test on interface {}", ipv6iface.getDisplayName)
false

View file

@ -21,7 +21,7 @@ object ScalaUdpDocSpec {
IO(Udp) ! Udp.SimpleSender
def receive = {
case Udp.SimpleSenderReady =>
case Udp.SimpleSenderReady
context.become(ready(sender()))
//#sender
sender() ! Udp.Send(ByteString("hello"), remote)
@ -29,7 +29,7 @@ object ScalaUdpDocSpec {
}
def ready(send: ActorRef): Receive = {
case msg: String =>
case msg: String
send ! Udp.Send(ByteString(msg), remote)
//#sender
if (msg == "world") send ! PoisonPill
@ -44,7 +44,7 @@ object ScalaUdpDocSpec {
IO(Udp) ! Udp.Bind(self, new InetSocketAddress("localhost", 0))
def receive = {
case Udp.Bound(local) =>
case Udp.Bound(local)
//#listener
nextActor forward local
//#listener
@ -52,15 +52,15 @@ object ScalaUdpDocSpec {
}
def ready(socket: ActorRef): Receive = {
case Udp.Received(data, remote) =>
case Udp.Received(data, remote)
val processed = // parse data etc., e.g. using PipelineStage
//#listener
data.utf8String
//#listener
socket ! Udp.Send(data, remote) // example server echoes back
nextActor ! processed
case Udp.Unbind => socket ! Udp.Unbind
case Udp.Unbound => context.stop(self)
case Udp.Unbind socket ! Udp.Unbind
case Udp.Unbound context.stop(self)
}
}
//#listener
@ -71,7 +71,7 @@ object ScalaUdpDocSpec {
IO(UdpConnected) ! UdpConnected.Connect(self, remote)
def receive = {
case UdpConnected.Connected =>
case UdpConnected.Connected
context.become(ready(sender()))
//#connected
sender() ! UdpConnected.Send(ByteString("hello"))
@ -79,17 +79,17 @@ object ScalaUdpDocSpec {
}
def ready(connection: ActorRef): Receive = {
case UdpConnected.Received(data) =>
case UdpConnected.Received(data)
// process data, send it on, etc.
//#connected
if (data.utf8String == "hello")
connection ! UdpConnected.Send(ByteString("world"))
//#connected
case msg: String =>
case msg: String
connection ! UdpConnected.Send(ByteString(msg))
case UdpConnected.Disconnect =>
case UdpConnected.Disconnect
connection ! UdpConnected.Disconnect
case UdpConnected.Disconnected => context.stop(self)
case UdpConnected.Disconnected context.stop(self)
}
}
//#connected

View file

@ -59,13 +59,13 @@ class BackoffSupervisorDocSpec {
//#backoff-custom-stop
val supervisor = BackoffSupervisor.props(
Backoff.onStop(
childProps,
childName = "myEcho",
minBackoff = 3.seconds,
maxBackoff = 30.seconds,
randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
).withManualReset // the child must send BackoffSupervisor.Reset to its parent
.withDefaultStoppingStrategy // Stop at any Exception thrown
childProps,
childName = "myEcho",
minBackoff = 3.seconds,
maxBackoff = 30.seconds,
randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
).withManualReset // the child must send BackoffSupervisor.Reset to its parent
.withDefaultStoppingStrategy // Stop at any Exception thrown
)
//#backoff-custom-stop
@ -89,8 +89,8 @@ class BackoffSupervisorDocSpec {
).withAutoReset(10.seconds) // reset if the child does not throw any errors within 10 seconds
.withSupervisorStrategy(
OneForOneStrategy() {
case _: MyException => SupervisorStrategy.Restart
case _ => SupervisorStrategy.Escalate
case _: MyException SupervisorStrategy.Restart
case _ SupervisorStrategy.Escalate
}))
//#backoff-custom-fail

View file

@ -48,14 +48,14 @@ object PersistenceDocSpec {
//#recovery-completed
override def receiveRecover: Receive = {
case RecoveryCompleted =>
case RecoveryCompleted
// perform init after recovery, before any other messages
//...
case evt => //...
case evt //...
}
override def receiveCommand: Receive = {
case msg => //...
case msg //...
}
//#recovery-completed
}
@ -84,10 +84,10 @@ object PersistenceDocSpec {
//#persistence-id-override
override def receiveRecover: Receive = {
case _ =>
case _
}
override def receiveCommand: Receive = {
case _ =>
case _
}
}
}
@ -128,25 +128,25 @@ object PersistenceDocSpec {
override def persistenceId: String = "persistence-id"
override def receiveCommand: Receive = {
case s: String => persist(MsgSent(s))(updateState)
case Confirm(deliveryId) => persist(MsgConfirmed(deliveryId))(updateState)
case s: String persist(MsgSent(s))(updateState)
case Confirm(deliveryId) persist(MsgConfirmed(deliveryId))(updateState)
}
override def receiveRecover: Receive = {
case evt: Evt => updateState(evt)
case evt: Evt updateState(evt)
}
def updateState(evt: Evt): Unit = evt match {
case MsgSent(s) =>
deliver(destination)(deliveryId => Msg(deliveryId, s))
case MsgSent(s)
deliver(destination)(deliveryId Msg(deliveryId, s))
case MsgConfirmed(deliveryId) => confirmDelivery(deliveryId)
case MsgConfirmed(deliveryId) confirmDelivery(deliveryId)
}
}
class MyDestination extends Actor {
def receive = {
case Msg(deliveryId, s) =>
case Msg(deliveryId, s)
// ...
sender() ! Confirm(deliveryId)
}
@ -166,10 +166,10 @@ object PersistenceDocSpec {
val snapShotInterval = 1000
override def receiveCommand: Receive = {
case SaveSnapshotSuccess(metadata) => // ...
case SaveSnapshotFailure(metadata, reason) => // ...
case cmd: String =>
persist(s"evt-$cmd") { e =>
case SaveSnapshotSuccess(metadata) // ...
case SaveSnapshotFailure(metadata, reason) // ...
case cmd: String
persist(s"evt-$cmd") { e
updateState(e)
if (lastSequenceNr % snapShotInterval == 0 && lastSequenceNr != 0)
saveSnapshot(state)
@ -195,9 +195,9 @@ object PersistenceDocSpec {
var state: Any = _
override def receiveRecover: Receive = {
case SnapshotOffer(metadata, offeredSnapshot) => state = offeredSnapshot
case RecoveryCompleted =>
case event => // ...
case SnapshotOffer(metadata, offeredSnapshot) state = offeredSnapshot
case RecoveryCompleted
case event // ...
}
//#snapshot-offer
@ -214,14 +214,14 @@ object PersistenceDocSpec {
override def persistenceId = "my-stable-persistence-id"
override def receiveRecover: Receive = {
case _ => // handle recovery here
case _ // handle recovery here
}
override def receiveCommand: Receive = {
case c: String => {
case c: String {
sender() ! c
persistAsync(s"evt-$c-1") { e => sender() ! e }
persistAsync(s"evt-$c-2") { e => sender() ! e }
persistAsync(s"evt-$c-1") { e sender() ! e }
persistAsync(s"evt-$c-2") { e sender() ! e }
}
}
}
@ -249,15 +249,15 @@ object PersistenceDocSpec {
override def persistenceId = "my-stable-persistence-id"
override def receiveRecover: Receive = {
case _ => // handle recovery here
case _ // handle recovery here
}
override def receiveCommand: Receive = {
case c: String => {
case c: String {
sender() ! c
persistAsync(s"evt-$c-1") { e => sender() ! e }
persistAsync(s"evt-$c-2") { e => sender() ! e }
deferAsync(s"evt-$c-3") { e => sender() ! e }
persistAsync(s"evt-$c-1") { e sender() ! e }
persistAsync(s"evt-$c-2") { e sender() ! e }
deferAsync(s"evt-$c-3") { e sender() ! e }
}
}
}
@ -287,15 +287,15 @@ object PersistenceDocSpec {
override def persistenceId = "my-stable-persistence-id"
override def receiveRecover: Receive = {
case _ => // handle recovery here
case _ // handle recovery here
}
override def receiveCommand: Receive = {
case c: String => {
case c: String {
sender() ! c
persist(s"evt-$c-1") { e => sender() ! e }
persist(s"evt-$c-2") { e => sender() ! e }
deferAsync(s"evt-$c-3") { e => sender() ! e }
persist(s"evt-$c-1") { e sender() ! e }
persist(s"evt-$c-2") { e sender() ! e }
deferAsync(s"evt-$c-3") { e sender() ! e }
}
}
}
@ -308,24 +308,24 @@ object PersistenceDocSpec {
override def persistenceId = "my-stable-persistence-id"
override def receiveRecover: Receive = {
case _ => // handle recovery here
case _ // handle recovery here
}
//#nested-persist-persist
override def receiveCommand: Receive = {
case c: String =>
case c: String
sender() ! c
persist(s"$c-1-outer") { outer1 =>
persist(s"$c-1-outer") { outer1
sender() ! outer1
persist(s"$c-1-inner") { inner1 =>
persist(s"$c-1-inner") { inner1
sender() ! inner1
}
}
persist(s"$c-2-outer") { outer2 =>
persist(s"$c-2-outer") { outer2
sender() ! outer2
persist(s"$c-2-inner") { inner2 =>
persist(s"$c-2-inner") { inner2
sender() ! inner2
}
}
@ -356,20 +356,20 @@ object PersistenceDocSpec {
override def persistenceId = "my-stable-persistence-id"
override def receiveRecover: Receive = {
case _ => // handle recovery here
case _ // handle recovery here
}
//#nested-persistAsync-persistAsync
override def receiveCommand: Receive = {
case c: String =>
case c: String
sender() ! c
persistAsync(c + "-outer-1") { outer =>
persistAsync(c + "-outer-1") { outer
sender() ! outer
persistAsync(c + "-inner-1") { inner => sender() ! inner }
persistAsync(c + "-inner-1") { inner sender() ! inner }
}
persistAsync(c + "-outer-2") { outer =>
persistAsync(c + "-outer-2") { outer
sender() ! outer
persistAsync(c + "-inner-2") { inner => sender() ! inner }
persistAsync(c + "-inner-2") { inner sender() ! inner }
}
}
//#nested-persistAsync-persistAsync
@ -408,15 +408,15 @@ object PersistenceDocSpec {
override def persistenceId = "safe-actor"
override def receiveCommand: Receive = {
case c: String =>
case c: String
println(c)
persist(s"handle-$c") { println(_) }
case Shutdown =>
case Shutdown
context.stop(self)
}
override def receiveRecover: Receive = {
case _ => // handle recovery here
case _ // handle recovery here
}
}
//#safe-shutdown

Some files were not shown because too many files have changed in this diff Show more