=doc #17329 Fixed and normalized spellings in ScalaDoc and comments

This commit is contained in:
Andrey Myatlyuk 2015-06-02 21:01:00 -07:00
parent 10f039f70d
commit bc791eb86c
77 changed files with 100 additions and 100 deletions

View file

@ -137,7 +137,7 @@ public abstract class AbstractBoundedNodeQueue<T> {
*/ */
public final int size() { public final int size() {
//Order of operations is extremely important here //Order of operations is extremely important here
// If no item was dequeued between when we looked at the count of the enqueueing end, // If no item was dequeued between when we looked at the count of the enqueuing end,
// there should be no out-of-bounds // there should be no out-of-bounds
for(;;) { for(;;) {
final int deqCountBefore = getDeq().count; final int deqCountBefore = getDeq().count;

View file

@ -465,7 +465,7 @@ private[akka] trait MinimalActorRef extends InternalActorRef with LocalRef {
protected def writeReplace(): AnyRef = SerializedActorRef(this) protected def writeReplace(): AnyRef = SerializedActorRef(this)
} }
/** Subscribe to this class to be notified about all DeadLetters (also the supressed ones). */ /** Subscribe to this class to be notified about all DeadLetters (also the suppressed ones). */
sealed trait AllDeadLetters { sealed trait AllDeadLetters {
def message: Any def message: Any
def sender: ActorRef def sender: ActorRef

View file

@ -133,7 +133,7 @@ object ActorSystem {
* falls back to the ClassLoader associated with the ActorSystem class. * falls back to the ClassLoader associated with the ActorSystem class.
* If an ExecutionContext is given, it will be used as the default executor inside this ActorSystem. * If an ExecutionContext is given, it will be used as the default executor inside this ActorSystem.
* If no ExecutionContext is given, the system will fallback to the executor configured under "akka.actor.default-dispatcher.default-executor.fallback". * If no ExecutionContext is given, the system will fallback to the executor configured under "akka.actor.default-dispatcher.default-executor.fallback".
* The system will use the passed in config, or falls back to the deafult reference configuration using the ClassLoader. * The system will use the passed in config, or falls back to the default reference configuration using the ClassLoader.
* *
* @see <a href="http://typesafehub.github.io/config/v1.3.0/" target="_blank">The Typesafe Config Library API Documentation</a> * @see <a href="http://typesafehub.github.io/config/v1.3.0/" target="_blank">The Typesafe Config Library API Documentation</a>
*/ */
@ -828,7 +828,7 @@ private[akka] class ActorSystemImpl(
private[this] final val done = Promise[T]() private[this] final val done = Promise[T]()
private[this] final val ref = new AtomicReference(done) private[this] final val ref = new AtomicReference(done)
// onComplete never fires twice so safe to avoid nullcheck // onComplete never fires twice so safe to avoid null check
upStreamTerminated onComplete { t ref.getAndSet(null).complete(t) } upStreamTerminated onComplete { t ref.getAndSet(null).complete(t) }
/** /**

View file

@ -212,7 +212,7 @@ object FSM {
final case class Event[D](event: Any, stateData: D) extends NoSerializationVerificationNeeded final case class Event[D](event: Any, stateData: D) extends NoSerializationVerificationNeeded
/** /**
* Case class representing the state of the [[akka.actor.FSM]] whithin the * Case class representing the state of the [[akka.actor.FSM]] within the
* `onTermination` block. * `onTermination` block.
*/ */
final case class StopEvent[S, D](reason: Reason, currentState: S, stateData: D) extends NoSerializationVerificationNeeded final case class StopEvent[S, D](reason: Reason, currentState: S, stateData: D) extends NoSerializationVerificationNeeded

View file

@ -213,7 +213,7 @@ class LightArrayRevolverScheduler(config: Config,
try { try {
((d + TickDuration - oneNs) / TickDuration).toLong * TickDuration ((d + TickDuration - oneNs) / TickDuration).toLong * TickDuration
} catch { } catch {
case _: IllegalArgumentException d // rouding up Long.MaxValue.nanos overflows case _: IllegalArgumentException d // rounding up Long.MaxValue.nanos overflows
} }
/** /**

View file

@ -59,7 +59,7 @@ trait TypedActorFactory {
} }
/** /**
* Returns wether the supplied AnyRef is a TypedActor proxy or not * Returns whether the supplied AnyRef is a TypedActor proxy or not
*/ */
def isTypedActor(proxyOrNot: AnyRef): Boolean def isTypedActor(proxyOrNot: AnyRef): Boolean
@ -654,7 +654,7 @@ class TypedActorExtension(val system: ExtendedActorSystem) extends TypedActorFac
} }
/** /**
* Returns wether the supplied AnyRef is a TypedActor proxy or not * Returns whether the supplied AnyRef is a TypedActor proxy or not
*/ */
def isTypedActor(proxyOrNot: AnyRef): Boolean = invocationHandlerFor(proxyOrNot) ne null def isTypedActor(proxyOrNot: AnyRef): Boolean = invocationHandlerFor(proxyOrNot) ne null
@ -675,7 +675,7 @@ class TypedActorExtension(val system: ExtendedActorSystem) extends TypedActorFac
proxy proxy
} else { } else {
proxyVar set proxy // Chicken and egg situation we needed to solve, set the proxy so that we can set the self-reference inside each receive proxyVar set proxy // Chicken and egg situation we needed to solve, set the proxy so that we can set the self-reference inside each receive
actorVar set actorRef //Make sure the InvocationHandler gets ahold of the actor reference, this is not a problem since the proxy hasn't escaped this method yet actorVar set actorRef //Make sure the InvocationHandler gets a hold of the actor reference, this is not a problem since the proxy hasn't escaped this method yet
proxyVar.get proxyVar.get
} }
} }

View file

@ -59,7 +59,7 @@ private[akka] trait BatchingExecutor extends Executor {
protected final def resubmitUnbatched(): Boolean = { protected final def resubmitUnbatched(): Boolean = {
val current = _tasksLocal.get() val current = _tasksLocal.get()
_tasksLocal.remove() _tasksLocal.remove()
if ((current eq this) && !current.isEmpty) { // Resubmit outselves if something bad happened and we still have work to do if ((current eq this) && !current.isEmpty) { // Resubmit ourselves if something bad happened and we still have work to do
unbatchedExecute(current) //TODO what if this submission fails? unbatchedExecute(current) //TODO what if this submission fails?
true true
} else false } else false

View file

@ -88,7 +88,7 @@ object Futures {
* *
* The result becomes available once the asynchronous computation is completed. * The result becomes available once the asynchronous computation is completed.
* *
* @param body the asychronous computation * @param body the asynchronous computation
* @param executor the execution context on which the future is run * @param executor the execution context on which the future is run
* @return the `Future` holding the result of the computation * @return the `Future` holding the result of the computation
*/ */

View file

@ -126,7 +126,7 @@ class EventStream(sys: ActorSystem, private val debug: Boolean) extends LoggingB
* *
* The actual check if the subscriber still has subscriptions is performed by the `EventStreamUnsubscriber`, * The actual check if the subscriber still has subscriptions is performed by the `EventStreamUnsubscriber`,
* because it's an expensive operation, and we don want to block client-code for that long, the Actor will eventually * because it's an expensive operation, and we don want to block client-code for that long, the Actor will eventually
* catch up and perform the apropriate operation. * catch up and perform the appropriate operation.
*/ */
@tailrec @tailrec
private def unregisterIfNoMoreSubscribedChannels(subscriber: ActorRef): Unit = { private def unregisterIfNoMoreSubscribedChannels(subscriber: ActorRef): Unit = {

View file

@ -224,7 +224,7 @@ trait LoggingBus extends ActorEventBus {
* } * }
* *
* class MyClass extends MyType { * class MyClass extends MyType {
* val sys = ActorSyste("sys") * val sys = ActorSystem("sys")
* val log = Logging(sys, this) // will use "hallo,akka://sys" as logSource * val log = Logging(sys, this) // will use "hallo,akka://sys" as logSource
* def name = "hallo" * def name = "hallo"
* } * }

View file

@ -52,7 +52,7 @@ private[io] trait ChannelRegistry {
/** /**
* Implementations of this interface are sent as actor messages back to a channel actor as * Implementations of this interface are sent as actor messages back to a channel actor as
* a result of it having called `register` on the `ChannelRegistry`. * a result of it having called `register` on the `ChannelRegistry`.
* Enables a channel actor to directly schedule interest setting tasks to the selector mgmt. dispatcher. * Enables a channel actor to directly schedule interest setting tasks to the selector management dispatcher.
*/ */
private[io] trait ChannelRegistration extends NoSerializationVerificationNeeded { private[io] trait ChannelRegistration extends NoSerializationVerificationNeeded {
def enableInterest(op: Int) def enableInterest(op: Int)

View file

@ -26,7 +26,7 @@ import java.lang.{ Iterable ⇒ JIterable }
* In order to open an outbound connection send a [[Tcp.Connect]] message * In order to open an outbound connection send a [[Tcp.Connect]] message
* to the [[TcpExt#manager]]. * to the [[TcpExt#manager]].
* *
* In order to start listening for inbound connetions send a [[Tcp.Bind]] * In order to start listening for inbound connections send a [[Tcp.Bind]]
* message to the [[TcpExt#manager]]. * message to the [[TcpExt#manager]].
* *
* The Java API for generating TCP commands is available at [[TcpMessage]]. * The Java API for generating TCP commands is available at [[TcpMessage]].

View file

@ -49,11 +49,11 @@ private[io] class TcpManager(tcp: TcpExt)
def receive = workerForCommandHandler { def receive = workerForCommandHandler {
case c: Connect case c: Connect
val commander = sender() // cache because we create a function that will run asyncly val commander = sender() // cache because we create a function that will run asynchly
(registry Props(classOf[TcpOutgoingConnection], tcp, registry, commander, c)) (registry Props(classOf[TcpOutgoingConnection], tcp, registry, commander, c))
case b: Bind case b: Bind
val commander = sender() // cache because we create a function that will run asyncly val commander = sender() // cache because we create a function that will run asynchly
(registry Props(classOf[TcpListener], selectorPool, tcp, registry, commander, b)) (registry Props(classOf[TcpListener], selectorPool, tcp, registry, commander, b))
} }

View file

@ -14,7 +14,7 @@ private[io] class UdpConnectedManager(udpConn: UdpConnectedExt)
def receive = workerForCommandHandler { def receive = workerForCommandHandler {
case c: Connect case c: Connect
val commander = sender() // cache because we create a function that will run asyncly val commander = sender() // cache because we create a function that will run asynchly
registry Props(classOf[UdpConnection], udpConn, registry, commander, c) registry Props(classOf[UdpConnection], udpConn, registry, commander, c)
} }

View file

@ -47,11 +47,11 @@ private[io] class UdpManager(udp: UdpExt) extends SelectionHandler.SelectorBased
def receive = workerForCommandHandler { def receive = workerForCommandHandler {
case b: Bind case b: Bind
val commander = sender() // cache because we create a function that will run asyncly val commander = sender() // cache because we create a function that will run asynchly
(registry Props(classOf[UdpListener], udp, registry, commander, b)) (registry Props(classOf[UdpListener], udp, registry, commander, b))
case SimpleSender(options) case SimpleSender(options)
val commander = sender() // cache because we create a function that will run asyncly val commander = sender() // cache because we create a function that will run asynchly
(registry Props(classOf[UdpSender], udp, registry, commander, options)) (registry Props(classOf[UdpSender], udp, registry, commander, options))
} }

View file

@ -126,7 +126,7 @@ final case class BalancingPool(
} }
/** /**
* Uses the supervisor strategy of the given Routerconfig * Uses the supervisor strategy of the given RouterConfig
* if this RouterConfig doesn't have one. * if this RouterConfig doesn't have one.
*/ */
override def withFallback(other: RouterConfig): RouterConfig = override def withFallback(other: RouterConfig): RouterConfig =

View file

@ -97,7 +97,7 @@ final case class BroadcastPool(
def withDispatcher(dispatcherId: String): BroadcastPool = copy(routerDispatcher = dispatcherId) def withDispatcher(dispatcherId: String): BroadcastPool = copy(routerDispatcher = dispatcherId)
/** /**
* Uses the resizer and/or the supervisor strategy of the given Routerconfig * Uses the resizer and/or the supervisor strategy of the given RouterConfig
* if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if * if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if
* resizer was not defined in config. * resizer was not defined in config.
*/ */

View file

@ -314,7 +314,7 @@ final case class ConsistentHashingPool(
copy(hashMapping = ConsistentHashingRouter.hashMappingAdapter(mapper)) copy(hashMapping = ConsistentHashingRouter.hashMappingAdapter(mapper))
/** /**
* Uses the resizer and/or the supervisor strategy of the given Routerconfig * Uses the resizer and/or the supervisor strategy of the given RouterConfig
* if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if * if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if
* resizer was not defined in config. * resizer was not defined in config.
* Uses the the `hashMapping` defined in code, since that can't be defined in configuration. * Uses the the `hashMapping` defined in code, since that can't be defined in configuration.

View file

@ -98,7 +98,7 @@ final case class RandomPool(
def withDispatcher(dispatcherId: String): RandomPool = copy(routerDispatcher = dispatcherId) def withDispatcher(dispatcherId: String): RandomPool = copy(routerDispatcher = dispatcherId)
/** /**
* Uses the resizer and/or the supervisor strategy of the given Routerconfig * Uses the resizer and/or the supervisor strategy of the given RouterConfig
* if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if * if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if
* resizer was not defined in config. * resizer was not defined in config.
*/ */

View file

@ -49,7 +49,7 @@ trait Resizer {
* returns true and no other resize is in progress. * returns true and no other resize is in progress.
* *
* Return the number of routees to add or remove. Negative value will remove that number of routees. * Return the number of routees to add or remove. Negative value will remove that number of routees.
* Positive value will add that number of routess. 0 will not change the routees. * Positive value will add that number of routees. 0 will not change the routees.
* *
* This method is invoked only in the context of the Router actor. * This method is invoked only in the context of the Router actor.
*/ */

View file

@ -102,7 +102,7 @@ final case class RoundRobinPool(
def withDispatcher(dispatcherId: String): RoundRobinPool = copy(routerDispatcher = dispatcherId) def withDispatcher(dispatcherId: String): RoundRobinPool = copy(routerDispatcher = dispatcherId)
/** /**
* Uses the resizer and/or the supervisor strategy of the given Routerconfig * Uses the resizer and/or the supervisor strategy of the given RouterConfig
* if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if * if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if
* resizer was not defined in config. * resizer was not defined in config.
*/ */

View file

@ -119,7 +119,7 @@ private[akka] class RoutedActorCell(
} }
/** /**
* Called when `router` is initalized but before `super.start()` to * Called when `router` is initialized but before `super.start()` to
* be able to do extra initialization in subclass. * be able to do extra initialization in subclass.
*/ */
protected def preSuperStart(): Unit = () protected def preSuperStart(): Unit = ()

View file

@ -54,7 +54,7 @@ trait RouterConfig extends Serializable {
* Possibility to define an actor for controlling the routing * Possibility to define an actor for controlling the routing
* logic from external stimuli (e.g. monitoring metrics). * logic from external stimuli (e.g. monitoring metrics).
* This actor will be a child of the router "head" actor. * This actor will be a child of the router "head" actor.
* Managment messages not handled by the "head" actor are * Management messages not handled by the "head" actor are
* delegated to this controller actor. * delegated to this controller actor.
*/ */
def routingLogicController(routingLogic: RoutingLogic): Option[Props] = None def routingLogicController(routingLogic: RoutingLogic): Option[Props] = None

View file

@ -136,7 +136,7 @@ final case class ScatterGatherFirstCompletedPool(
def withDispatcher(dispatcherId: String): ScatterGatherFirstCompletedPool = copy(routerDispatcher = dispatcherId) def withDispatcher(dispatcherId: String): ScatterGatherFirstCompletedPool = copy(routerDispatcher = dispatcherId)
/** /**
* Uses the resizer and/or the supervisor strategy of the given Routerconfig * Uses the resizer and/or the supervisor strategy of the given RouterConfig
* if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if * if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if
* resizer was not defined in config. * resizer was not defined in config.
*/ */

View file

@ -213,7 +213,7 @@ final case class SmallestMailboxPool(
def withDispatcher(dispatcherId: String): SmallestMailboxPool = copy(routerDispatcher = dispatcherId) def withDispatcher(dispatcherId: String): SmallestMailboxPool = copy(routerDispatcher = dispatcherId)
/** /**
* Uses the resizer and/or the supervisor strategy of the given Routerconfig * Uses the resizer and/or the supervisor strategy of the given RouterConfig
* if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if * if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if
* resizer was not defined in config. * resizer was not defined in config.
*/ */

View file

@ -83,7 +83,7 @@ private[akka] final case class TailChoppingRoutees(
} }
/** /**
* A router poll thats sends the message to a first, random picked, routee, * A router poll that sends the message to a first, random picked, routee,
* then wait a specified `interval` and then send to a second, random picked, and so on till one full cycle.. * then wait a specified `interval` and then send to a second, random picked, and so on till one full cycle..
* *
* The configuration parameter trumps the constructor arguments. This means that * The configuration parameter trumps the constructor arguments. This means that
@ -169,7 +169,7 @@ final case class TailChoppingPool(
def withDispatcher(dispatcherId: String): TailChoppingPool = copy(routerDispatcher = dispatcherId) def withDispatcher(dispatcherId: String): TailChoppingPool = copy(routerDispatcher = dispatcherId)
/** /**
* Uses the resizer and/or the supervisor strategy of the given Routerconfig * Uses the resizer and/or the supervisor strategy of the given RouterConfig
* if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if * if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if
* resizer was not defined in config. * resizer was not defined in config.
*/ */

View file

@ -138,7 +138,7 @@ class Switch(startAsOn: Boolean = false) {
def isOn: Boolean = switch.get def isOn: Boolean = switch.get
/** /**
* Returns whether the switch is IMMEDDIATELY off (no locking) * Returns whether the switch is IMMEDIATELY off (no locking)
*/ */
def isOff: Boolean = !isOn def isOff: Boolean = !isOn
} }

View file

@ -103,7 +103,7 @@ class CamelSettings private[camel] (config: Config, dynamicAccess: DynamicAccess
(s: String, r: RouteDefinition) conversions.get(s).fold(r)(r.convertBodyTo) (s: String, r: RouteDefinition) conversions.get(s).fold(r)(r.convertBodyTo)
} }
/** /**
* Configured setting, determine the class used to load/retrive the instance of the Camel Context * Configured setting, determine the class used to load/retrieve the instance of the Camel Context
*/ */
final val ContextProvider: ContextProvider = { final val ContextProvider: ContextProvider = {
val fqcn = config.getString("akka.camel.context-provider") val fqcn = config.getString("akka.camel.context-provider")

View file

@ -163,7 +163,7 @@ final case class AdaptiveLoadBalancingPool(
def withDispatcher(dispatcherId: String): AdaptiveLoadBalancingPool = copy(routerDispatcher = dispatcherId) def withDispatcher(dispatcherId: String): AdaptiveLoadBalancingPool = copy(routerDispatcher = dispatcherId)
/** /**
* Uses the supervisor strategy of the given Routerconfig * Uses the supervisor strategy of the given RouterConfig
* if this RouterConfig doesn't have one * if this RouterConfig doesn't have one
*/ */
override def withFallback(other: RouterConfig): RouterConfig = override def withFallback(other: RouterConfig): RouterConfig =

View file

@ -81,7 +81,7 @@ private[metrics] object MetricsCollector {
* Loads JVM and system metrics through JMX monitoring beans. * Loads JVM and system metrics through JMX monitoring beans.
* *
* @param address The [[akka.actor.Address]] of the node being sampled * @param address The [[akka.actor.Address]] of the node being sampled
* @param decay how quickly the exponential weighting of past data is decayed * @param decayFactor how quickly the exponential weighting of past data is decayed
*/ */
class JmxMetricsCollector(address: Address, decayFactor: Double) extends MetricsCollector { class JmxMetricsCollector(address: Address, decayFactor: Double) extends MetricsCollector {
import StandardMetrics._ import StandardMetrics._
@ -184,7 +184,7 @@ class JmxMetricsCollector(address: Address, decayFactor: Double) extends Metrics
* to missing classes or native libraries. * to missing classes or native libraries.
* *
* @param address The [[akka.actor.Address]] of the node being sampled * @param address The [[akka.actor.Address]] of the node being sampled
* @param decay how quickly the exponential weighting of past data is decayed * @param decayFactor how quickly the exponential weighting of past data is decayed
* @param sigar the org.hyperic.Sigar instance * @param sigar the org.hyperic.Sigar instance
*/ */
class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarProxy) class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarProxy)

View file

@ -399,7 +399,7 @@ private[akka] object ClusterShardingGuardian {
} }
/** /**
* INTERNAL API. [[ShardRegion]] and [[ShardCoordinator]] actors are createad as children * INTERNAL API. [[ShardRegion]] and [[ShardCoordinator]] actors are created as children
* of this actor. * of this actor.
*/ */
private[akka] class ClusterShardingGuardian extends Actor { private[akka] class ClusterShardingGuardian extends Actor {
@ -1467,7 +1467,7 @@ object ShardCoordinator {
*/ */
@SerialVersionUID(1L) final case class RegisterProxy(shardRegionProxy: ActorRef) extends CoordinatorCommand @SerialVersionUID(1L) final case class RegisterProxy(shardRegionProxy: ActorRef) extends CoordinatorCommand
/** /**
* Acknowledgement from `ShardCoordinator` that [[Register]] or [[RegisterProxy]] was sucessful. * Acknowledgement from `ShardCoordinator` that [[Register]] or [[RegisterProxy]] was successful.
*/ */
@SerialVersionUID(1L) final case class RegisterAck(coordinator: ActorRef) extends CoordinatorMessage @SerialVersionUID(1L) final case class RegisterAck(coordinator: ActorRef) extends CoordinatorMessage
/** /**
@ -1484,7 +1484,7 @@ object ShardCoordinator {
*/ */
@SerialVersionUID(1L) final case class ShardHome(shard: ShardId, ref: ActorRef) extends CoordinatorMessage @SerialVersionUID(1L) final case class ShardHome(shard: ShardId, ref: ActorRef) extends CoordinatorMessage
/** /**
* `ShardCoodinator` informs a `ShardRegion` that it is hosting this shard * `ShardCoordinator` informs a `ShardRegion` that it is hosting this shard
*/ */
@SerialVersionUID(1L) final case class HostShard(shard: ShardId) extends CoordinatorMessage @SerialVersionUID(1L) final case class HostShard(shard: ShardId) extends CoordinatorMessage
/** /**
@ -1505,7 +1505,7 @@ object ShardCoordinator {
*/ */
@SerialVersionUID(1L) final case class BeginHandOffAck(shard: ShardId) extends CoordinatorCommand @SerialVersionUID(1L) final case class BeginHandOffAck(shard: ShardId) extends CoordinatorCommand
/** /**
* When all `ShardRegion` actors have acknoledged the `BeginHandOff` the * When all `ShardRegion` actors have acknowledged the `BeginHandOff` the
* `ShardCoordinator` sends this message to the `ShardRegion` responsible for the * `ShardCoordinator` sends this message to the `ShardRegion` responsible for the
* shard. The `ShardRegion` is supposed to stop all entries in that shard and when * shard. The `ShardRegion` is supposed to stop all entries in that shard and when
* all entries have terminated reply with `ShardStopped` to the `ShardCoordinator`. * all entries have terminated reply with `ShardStopped` to the `ShardCoordinator`.
@ -1872,7 +1872,7 @@ class ShardCoordinator(handOffTimeout: FiniteDuration, shardStartTimeout: Finite
case ShardHome(_, _) case ShardHome(_, _)
//On rebalance, we send ourselves a GetShardHome message to reallocate a //On rebalance, we send ourselves a GetShardHome message to reallocate a
// shard. This recieve handles the "response" from that message. i.e. Ingores it. // shard. This receive handles the "response" from that message. i.e. ignores it.
case ClusterShuttingDown case ClusterShuttingDown
log.debug("Shutting down ShardCoordinator") log.debug("Shutting down ShardCoordinator")

View file

@ -88,7 +88,7 @@ object ClusterShardingSpec extends MultiNodeConfig {
override def postStop(): Unit = { override def postStop(): Unit = {
super.postStop() super.postStop()
// Simulate that the passivation takes some time, to verify passivation bufffering // Simulate that the passivation takes some time, to verify passivation buffering
Thread.sleep(500) Thread.sleep(500)
} }
//#counter-actor //#counter-actor

View file

@ -154,7 +154,7 @@ object ClusterSingletonManager {
*/ */
private object Internal { private object Internal {
/** /**
* Sent from new oldest to previous oldest to initate the * Sent from new oldest to previous oldest to initiate the
* hand-over process. `HandOverInProgress` and `HandOverDone` * hand-over process. `HandOverInProgress` and `HandOverDone`
* are expected replies. * are expected replies.
*/ */

View file

@ -70,7 +70,7 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig {
} }
/** /**
* This channel is extremly strict with regards to * This channel is extremely strict with regards to
* registration and unregistration of consumer to * registration and unregistration of consumer to
* be able to detect misbehaviour (e.g. two active * be able to detect misbehaviour (e.g. two active
* singleton instances). * singleton instances).

View file

@ -128,7 +128,7 @@ private[cluster] object InternalClusterAction {
final case class PublisherCreated(publisher: ActorRef) final case class PublisherCreated(publisher: ActorRef)
/** /**
* Comand to [[akka.cluster.ClusterDaemon]] to create a * Command to [[akka.cluster.ClusterDaemon]] to create a
* [[akka.cluster.OnMemberStatusChangedListener]]. * [[akka.cluster.OnMemberStatusChangedListener]].
*/ */
final case class AddOnMemberUpListener(callback: Runnable) extends NoSerializationVerificationNeeded final case class AddOnMemberUpListener(callback: Runnable) extends NoSerializationVerificationNeeded

View file

@ -174,7 +174,7 @@ final case class AdaptiveLoadBalancingPool(
def withDispatcher(dispatcherId: String): AdaptiveLoadBalancingPool = copy(routerDispatcher = dispatcherId) def withDispatcher(dispatcherId: String): AdaptiveLoadBalancingPool = copy(routerDispatcher = dispatcherId)
/** /**
* Uses the supervisor strategy of the given Routerconfig * Uses the supervisor strategy of the given RouterConfig
* if this RouterConfig doesn't have one * if this RouterConfig doesn't have one
*/ */
override def withFallback(other: RouterConfig): RouterConfig = override def withFallback(other: RouterConfig): RouterConfig =

View file

@ -96,7 +96,7 @@ abstract class ClusterAccrualFailureDetectorSpec
enterBarrier("third-shutdown") enterBarrier("third-shutdown")
runOn(first, second) { runOn(first, second) {
// remaning nodes should detect failure... // remaining nodes should detect failure...
awaitCond(!cluster.failureDetector.isAvailable(third), 15.seconds) awaitCond(!cluster.failureDetector.isAvailable(third), 15.seconds)
// other connections still ok // other connections still ok
cluster.failureDetector.isAvailable(first) should ===(true) cluster.failureDetector.isAvailable(first) should ===(true)

View file

@ -343,7 +343,7 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro
*/ */
def markNodeAsUnavailable(address: Address): Unit = { def markNodeAsUnavailable(address: Address): Unit = {
if (isFailureDetectorPuppet) { if (isFailureDetectorPuppet) {
// before marking it as unavailble there should be at least one heartbeat // before marking it as unavailable there should be at least one heartbeat
// to create the FailureDetectorPuppet in the FailureDetectorRegistry // to create the FailureDetectorPuppet in the FailureDetectorRegistry
cluster.failureDetector.heartbeat(address) cluster.failureDetector.heartbeat(address)
failureDetectorPuppet(address) foreach (_.markNodeAsUnavailable()) failureDetectorPuppet(address) foreach (_.markNodeAsUnavailable())

View file

@ -94,7 +94,7 @@ $.extend($.effects, {
// The main function to form the object for animation // The main function to form the object for animation
for(var n in newStyle) { for(var n in newStyle) {
if( typeof newStyle[n] != "function" && newStyle[n] /* No functions and null properties */ if( typeof newStyle[n] != "function" && newStyle[n] /* No functions and null properties */
&& n.indexOf("Moz") == -1 && n.indexOf("length") == -1 /* No mozilla spezific render properties. */ && n.indexOf("Moz") == -1 && n.indexOf("length") == -1 /* No mozilla specific render properties. */
&& newStyle[n] != oldStyle[n] /* Only values that have changed are used for the animation */ && newStyle[n] != oldStyle[n] /* Only values that have changed are used for the animation */
&& (n.match(/color/i) || (!n.match(/color/i) && !isNaN(parseInt(newStyle[n],10)))) /* Only things that can be parsed to integers or colors */ && (n.match(/color/i) || (!n.match(/color/i) && !isNaN(parseInt(newStyle[n],10)))) /* Only things that can be parsed to integers or colors */
&& (oldStyle.position != "static" || (oldStyle.position == "static" && !n.match(/left|top|bottom|right/))) /* No need for positions when dealing with static positions */ && (oldStyle.position != "static" || (oldStyle.position == "static" && !n.match(/left|top|bottom|right/))) /* No need for positions when dealing with static positions */

View file

@ -63,7 +63,7 @@ abstract class ActorSystemActivator extends BundleActivator {
val filter = s"(objectclass=${classOf[LogService].getName})" val filter = s"(objectclass=${classOf[LogService].getName})"
context.addServiceListener(logServiceListner, filter) context.addServiceListener(logServiceListner, filter)
//Small trick to create an event if the service is registred before this start listing for //Small trick to create an event if the service is registered before this start listing for
Option(context.getServiceReference(classOf[LogService].getName)).foreach(x { Option(context.getServiceReference(classOf[LogService].getName)).foreach(x {
logServiceListner.serviceChanged(new ServiceEvent(ServiceEvent.REGISTERED, x)) logServiceListner.serviceChanged(new ServiceEvent(ServiceEvent.REGISTERED, x))
}) })

View file

@ -47,8 +47,8 @@ class DefaultOSGiLogger extends DefaultLogger {
} }
/** /**
* Behaviour of the Eventhanlder that is setup (has received a LogService) * Behaviour of the Event handler that is setup (has received a LogService)
* @param logService registrered OSGi LogService * @param logService registered OSGi LogService
* @return Receive : Logs LogEvent or go back to the uninitialised state * @return Receive : Logs LogEvent or go back to the uninitialised state
*/ */
def initialisedReceive(logService: LogService): Receive = { def initialisedReceive(logService: LogService): Receive = {
@ -65,7 +65,7 @@ class DefaultOSGiLogger extends DefaultLogger {
* Logs a message in an OSGi LogService * Logs a message in an OSGi LogService
* *
* @param logService OSGi LogService registered and used for logging * @param logService OSGi LogService registered and used for logging
* @param event akka LogEvent that is log unsing the LogService * @param event akka LogEvent that is logged using the LogService
*/ */
def logMessage(logService: LogService, event: LogEvent) { def logMessage(logService: LogService, event: LogEvent) {
event match { event match {
@ -79,7 +79,7 @@ class DefaultOSGiLogger extends DefaultLogger {
} }
/** /**
* Message sent when LogService is unregistred. * Message sent when LogService is unregistered.
* Sent from the ActorSystemActivator to a logger (as DefaultOsgiLogger). * Sent from the ActorSystemActivator to a logger (as DefaultOsgiLogger).
*/ */
case object UnregisteringLogService case object UnregisteringLogService

View file

@ -18,7 +18,7 @@ abstract class PluginSpec(val config: Config) extends TestKitBase with WordSpecL
private var _pid: String = _ private var _pid: String = _
// used to avoid messages be delivered to a restarted actor, // used to avoid messages be delivered to a restarted actor,
// this is akka-persistence internals and journals themselfes don't really care // this is akka-persistence internals and journals themselves don't really care
protected val actorInstanceId = 1 protected val actorInstanceId = 1
override protected def beforeEach(): Unit = override protected def beforeEach(): Unit =

View file

@ -20,7 +20,7 @@ import org.scalatest.junit.JUnitRunner
* It is *NOT* meant to be a comprehensive benchmark, but rather aims to help plugin developers to easily determine * It is *NOT* meant to be a comprehensive benchmark, but rather aims to help plugin developers to easily determine
* if their plugin's performance is roughly as expected. It also validates the plugin still works under "more messages" scenarios. * if their plugin's performance is roughly as expected. It also validates the plugin still works under "more messages" scenarios.
* *
* The measurements are by default printed to `System.out`, if you want to customise this please override the [[#info]] method. * The measurements are by default printed to `System.out`, if you want to customize this please override the [[#info]] method.
* *
* The benchmark iteration and message counts are easily customisable by overriding these methods: * The benchmark iteration and message counts are easily customisable by overriding these methods:
* *
@ -36,7 +36,7 @@ import org.scalatest.junit.JUnitRunner
* }}} * }}}
* *
* In case your journal plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll` * In case your journal plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll`
* methods (don't forget to call `super` in your overriden methods). * methods (don't forget to call `super` in your overridden methods).
* *
* @see [[akka.persistence.journal.JournalSpec]] * @see [[akka.persistence.journal.JournalSpec]]
* @see [[akka.persistence.journal.JournalPerfSpec]] * @see [[akka.persistence.journal.JournalPerfSpec]]

View file

@ -14,7 +14,7 @@ import org.scalatest.junit.JUnitRunner
* Java / JUnit API for [[akka.persistence.journal.JournalSpec]]. * Java / JUnit API for [[akka.persistence.journal.JournalSpec]].
* *
* In case your journal plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll` * In case your journal plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll`
* methods (don't forget to call `super` in your overriden methods). * methods (don't forget to call `super` in your overridden methods).
* *
* @see [[akka.persistence.journal.JournalSpec]] * @see [[akka.persistence.journal.JournalSpec]]
* @see [[akka.persistence.journal.JournalPerfSpec]] * @see [[akka.persistence.journal.JournalPerfSpec]]

View file

@ -15,7 +15,7 @@ import org.scalatest.junit.JUnitRunner
* Plugin authors are highly encouraged to include it in their plugin's test suites. * Plugin authors are highly encouraged to include it in their plugin's test suites.
* *
* In case your snapshot-store plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll` * In case your snapshot-store plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll`
* methods (don't forget to call `super` in your overriden methods). * methods (don't forget to call `super` in your overridden methods).
* *
* @see [[akka.persistence.snapshot.SnapshotStoreSpec]] * @see [[akka.persistence.snapshot.SnapshotStoreSpec]]
*/ */

View file

@ -69,7 +69,7 @@ object JournalPerfSpec {
* if their plugin's performance is roughly as expected. It also validates the plugin still works under "more messages" scenarios. * if their plugin's performance is roughly as expected. It also validates the plugin still works under "more messages" scenarios.
* *
* In case your journal plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll` * In case your journal plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll`
* methods (don't forget to call `super` in your overriden methods). * methods (don't forget to call `super` in your overridden methods).
* *
* For a Java and JUnit consumable version of the TCK please refer to [[akka.persistence.japi.journal.JavaJournalPerfSpec]]. * For a Java and JUnit consumable version of the TCK please refer to [[akka.persistence.japi.journal.JavaJournalPerfSpec]].
* *
@ -87,7 +87,7 @@ abstract class JournalPerfSpec(config: Config) extends JournalSpec(config) {
testProbe.expectMsg(awaitDuration, cmnds.last) testProbe.expectMsg(awaitDuration, cmnds.last)
} }
/** Executes a block of code multiple times (no warmup) */ /** Executes a block of code multiple times (no warm-up) */
def measure(msg: Duration String)(block: Unit): Unit = { def measure(msg: Duration String)(block: Unit): Unit = {
val measurements = Array.ofDim[Duration](measurementIterations) val measurements = Array.ofDim[Duration](measurementIterations)
var i = 0 var i = 0
@ -112,7 +112,7 @@ abstract class JournalPerfSpec(config: Config) extends JournalSpec(config) {
/** Override in order to customize timeouts used for expectMsg, in order to tune the awaits to your journal's perf */ /** Override in order to customize timeouts used for expectMsg, in order to tune the awaits to your journal's perf */
private def awaitDuration: FiniteDuration = awaitDurationMillis.millis private def awaitDuration: FiniteDuration = awaitDurationMillis.millis
/** Numbe of messages sent to the PersistentActor under test for each test iteration */ /** Number of messages sent to the PersistentActor under test for each test iteration */
def eventsCount: Int = 10 * 1000 def eventsCount: Int = 10 * 1000
/** Number of measurement iterations each test will be run. */ /** Number of measurement iterations each test will be run. */

View file

@ -21,7 +21,7 @@ object JournalSpec {
* Plugin authors are highly encouraged to include it in their plugin's test suites. * Plugin authors are highly encouraged to include it in their plugin's test suites.
* *
* In case your journal plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll` * In case your journal plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll`
* methods (don't forget to call `super` in your overriden methods). * methods (don't forget to call `super` in your overridden methods).
* *
* For a Java and JUnit consumable version of the TCK please refer to [[akka.persistence.japi.journal.JavaJournalSpec]]. * For a Java and JUnit consumable version of the TCK please refer to [[akka.persistence.japi.journal.JavaJournalSpec]].
* *

View file

@ -17,7 +17,7 @@ object SnapshotStoreSpec {
* Plugin authors are highly encouraged to include it in their plugin's test suites. * Plugin authors are highly encouraged to include it in their plugin's test suites.
* *
* In case your journal plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll` * In case your journal plugin needs some kind of setup or teardown, override the `beforeAll` or `afterAll`
* methods (don't forget to call `super` in your overriden methods). * methods (don't forget to call `super` in your overridden methods).
* *
* For a Java and JUnit consumable version of the TCK please refer to [[akka.persistence.japi.snapshot.JavaSnapshotStoreSpec]]. * For a Java and JUnit consumable version of the TCK please refer to [[akka.persistence.japi.snapshot.JavaSnapshotStoreSpec]].
* *

View file

@ -25,7 +25,7 @@ private[persistence] object Eventsourced {
def evt: Any def evt: Any
def handler: Any Unit def handler: Any Unit
} }
/** forces actor to stash incoming commands untill all these invocations are handled */ /** forces actor to stash incoming commands until all these invocations are handled */
private final case class StashingHandlerInvocation(evt: Any, handler: Any Unit) extends PendingHandlerInvocation private final case class StashingHandlerInvocation(evt: Any, handler: Any Unit) extends PendingHandlerInvocation
/** does not force the actor to stash commands; Originates from either `persistAsync` or `defer` calls */ /** does not force the actor to stash commands; Originates from either `persistAsync` or `defer` calls */
private final case class AsyncHandlerInvocation(evt: Any, handler: Any Unit) extends PendingHandlerInvocation private final case class AsyncHandlerInvocation(evt: Any, handler: Any Unit) extends PendingHandlerInvocation

View file

@ -111,7 +111,7 @@ abstract class RemoteNodeShutdownAndComesBackSpec
expectTerminated(subject) expectTerminated(subject)
// Establish watch with the new system. This triggers additional system message traffic. If buffers are out // Establish watch with the new system. This triggers additional system message traffic. If buffers are out
// of synch the remote system will be quarantined and the rest of the test will fail (or even in earlier // of sync the remote system will be quarantined and the rest of the test will fail (or even in earlier
// stages depending on circumstances). // stages depending on circumstances).
system.actorSelection(RootActorPath(secondAddress) / "user" / "subject") ! Identify("subject") system.actorSelection(RootActorPath(secondAddress) / "user" / "subject") ! Identify("subject")
val subjectNew = expectMsgType[ActorIdentity].ref.get val subjectNew = expectMsgType[ActorIdentity].ref.get

View file

@ -89,8 +89,8 @@ abstract class Ticket15109Spec extends MultiNodeSpec(Ticket15109Spec)
enterBarrier("actor-identified") enterBarrier("actor-identified")
runOn(second) { runOn(second) {
// Force a dissassociation. Using the message Shutdown, which is suboptimal here, but this is the only // Force a disassociation. Using the message Shutdown, which is suboptimal here, but this is the only
// DisassoicateInfo that triggers the code-path we want to test // DisassociateInfo that triggers the code-path we want to test
Await.result(RARP(system).provider.transport.managementCommand( Await.result(RARP(system).provider.transport.managementCommand(
ForceDisassociateExplicitly(node(first).address, AssociationHandle.Shutdown)), 3.seconds) ForceDisassociateExplicitly(node(first).address, AssociationHandle.Shutdown)), 3.seconds)
} }

View file

@ -134,7 +134,7 @@ class RemoteRoundRobinSpec extends MultiNodeSpec(RemoteRoundRobinMultiJvmSpec)
actor.isInstanceOf[RoutedActorRef] should ===(true) actor.isInstanceOf[RoutedActorRef] should ===(true)
actor ! GetRoutees actor ! GetRoutees
// initial nrOfInstances 1 + inital resize => 2 // initial nrOfInstances 1 + initial resize => 2
expectMsgType[Routees].routees.size should ===(2) expectMsgType[Routees].routees.size should ===(2)
val repliesFrom: Set[ActorRef] = val repliesFrom: Set[ActorRef] =

View file

@ -703,7 +703,7 @@ private[remote] class EndpointWriter(
val backoffNanos = backoffDeadlinelineNanoTime - System.nanoTime val backoffNanos = backoffDeadlinelineNanoTime - System.nanoTime
if (backoffNanos > 0) { if (backoffNanos > 0) {
LockSupport.parkNanos(backoffNanos) LockSupport.parkNanos(backoffNanos)
// parkNanos allows for spurious wakeup, check again // parkNanos allows for spurious wake-up, check again
backoff() backoff()
} }
} }
@ -791,7 +791,7 @@ private[remote] class EndpointWriter(
case Some(r) case Some(r)
r.tell(s, replyTo) r.tell(s, replyTo)
case None case None
// initalizing, buffer and take care of it later when buffer is sent // initializing, buffer and take care of it later when buffer is sent
enqueueInBuffer(s) enqueueInBuffer(s)
} }
case TakeOver(newHandle, replyTo) case TakeOver(newHandle, replyTo)

View file

@ -32,7 +32,7 @@ trait FailureDetectorRegistry[A] {
/** /**
* Records a heartbeat for a resource. If the resource is not yet registered (i.e. this is the first heartbeat) then * Records a heartbeat for a resource. If the resource is not yet registered (i.e. this is the first heartbeat) then
* it is automatially registered. * it is automatically registered.
*/ */
def heartbeat(resource: A): Unit def heartbeat(resource: A): Unit

View file

@ -186,7 +186,7 @@ private[akka] object HeartbeatHistory {
* Create an empty HeartbeatHistory, without any history. * Create an empty HeartbeatHistory, without any history.
* Can only be used as starting point for appending intervals. * Can only be used as starting point for appending intervals.
* The stats (mean, variance, stdDeviation) are not defined for * The stats (mean, variance, stdDeviation) are not defined for
* for empty HeartbeatHistory, i.e. throws AritmeticException. * for empty HeartbeatHistory, i.e. throws ArithmeticException.
*/ */
def apply(maxSampleSize: Int): HeartbeatHistory = HeartbeatHistory( def apply(maxSampleSize: Int): HeartbeatHistory = HeartbeatHistory(
maxSampleSize = maxSampleSize, maxSampleSize = maxSampleSize,
@ -201,7 +201,7 @@ private[akka] object HeartbeatHistory {
* It is capped by the number of samples specified in `maxSampleSize`. * It is capped by the number of samples specified in `maxSampleSize`.
* *
* The stats (mean, variance, stdDeviation) are not defined for * The stats (mean, variance, stdDeviation) are not defined for
* for empty HeartbeatHistory, i.e. throws AritmeticException. * for empty HeartbeatHistory, i.e. throws ArithmeticException.
*/ */
private[akka] final case class HeartbeatHistory private ( private[akka] final case class HeartbeatHistory private (
maxSampleSize: Int, maxSampleSize: Int,

View file

@ -47,7 +47,7 @@ private[akka] abstract class RemoteTransport(val system: ExtendedActorSystem, va
def addresses: immutable.Set[Address] def addresses: immutable.Set[Address]
/** /**
* The default transport address of the actorsystem * The default transport address of the ActorSystem
* @return The listen address of the default transport * @return The listen address of the default transport
*/ */
def defaultAddress: Address def defaultAddress: Address

View file

@ -68,7 +68,7 @@ private[akka] object RemoteWatcher {
* For a new node to be watched this actor periodically sends `RemoteWatcher.Heartbeat` * For a new node to be watched this actor periodically sends `RemoteWatcher.Heartbeat`
* to the peer actor on the other node, which replies with [[RemoteWatcher.HeartbeatRsp]] * to the peer actor on the other node, which replies with [[RemoteWatcher.HeartbeatRsp]]
* message back. The failure detector on the watching side monitors these heartbeat messages. * message back. The failure detector on the watching side monitors these heartbeat messages.
* If arrival of hearbeat messages stops it will be detected and this actor will publish * If arrival of heartbeat messages stops it will be detected and this actor will publish
* [[akka.actor.AddressTerminated]] to the [[akka.event.AddressTerminatedTopic]]. * [[akka.actor.AddressTerminated]] to the [[akka.event.AddressTerminatedTopic]].
* *
* When all actors on a node have been unwatched it will stop sending heartbeat messages. * When all actors on a node have been unwatched it will stop sending heartbeat messages.

View file

@ -16,7 +16,7 @@ import scala.reflect.ClassTag
import util.{ Failure, Success } import util.{ Failure, Success }
/** /**
* Serializes akka's internal DaemonMsgCreate using protobuf * Serializes Akka's internal DaemonMsgCreate using protobuf
* for the core structure of DaemonMsgCreate, Props and Deploy. * for the core structure of DaemonMsgCreate, Props and Deploy.
* Serialization of contained RouterConfig, Config, and Scope * Serialization of contained RouterConfig, Config, and Scope
* is done with configured serializer for those classes, by * is done with configured serializer for those classes, by

View file

@ -20,7 +20,7 @@ import scala.concurrent.ExecutionContext.Implicits.global
* *
* The TestTransport is basically a shared memory between actor systems. The TestTransport could be programmed to * The TestTransport is basically a shared memory between actor systems. The TestTransport could be programmed to
* emulate different failure modes of a Transport implementation. TestTransport keeps a log of the activities it was * emulate different failure modes of a Transport implementation. TestTransport keeps a log of the activities it was
* requested to do. This class is not optimized for performace and MUST not be used as an in-memory transport in * requested to do. This class is not optimized for performance and MUST not be used as an in-memory transport in
* production systems. * production systems.
*/ */
class TestTransport( class TestTransport(
@ -177,7 +177,7 @@ object TestTransport {
/** /**
* Test utility to make behavior of functions that return some Future[B] controllable from tests. This tool is able * Test utility to make behavior of functions that return some Future[B] controllable from tests. This tool is able
* to overwrite default behavior with any generic behavior, including failure, and exposes control to the timing of * to overwrite default behavior with any generic behavior, including failure, and exposes control to the timing of
* the completition of the returned future. * the completion of the returned future.
* *
* The utility is implemented as a stack of behaviors, where the behavior on the top of the stack represents the * The utility is implemented as a stack of behaviors, where the behavior on the top of the stack represents the
* currently active behavior. The bottom of the stack always contains the defaultBehavior which can not be popped * currently active behavior. The bottom of the stack always contains the defaultBehavior which can not be popped

View file

@ -145,13 +145,13 @@ object ThrottlerTransportAdapter {
} }
/** /**
* Management Command to force dissocation of an address. * Management Command to force disassociation of an address.
*/ */
@SerialVersionUID(1L) @SerialVersionUID(1L)
final case class ForceDisassociate(address: Address) final case class ForceDisassociate(address: Address)
/** /**
* Management Command to force dissocation of an address with an explicit error. * Management Command to force disassociation of an address with an explicit error.
*/ */
@SerialVersionUID(1L) @SerialVersionUID(1L)
final case class ForceDisassociateExplicitly(address: Address, reason: DisassociateInfo) final case class ForceDisassociateExplicitly(address: Address, reason: DisassociateInfo)

View file

@ -432,7 +432,7 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA
} catch { } catch {
case NonFatal(e) { case NonFatal(e) {
log.error("failed to bind to {}, shutting down Netty transport", address) log.error("failed to bind to {}, shutting down Netty transport", address)
try { shutdown() } catch { case NonFatal(e) } // ingore possible exception during shutdown try { shutdown() } catch { case NonFatal(e) } // ignore possible exception during shutdown
throw e throw e
} }
} }

View file

@ -81,7 +81,7 @@ abstract class TransformationSampleSpec extends MultiNodeSpec(TransformationSamp
val transformationFrontend = system.actorOf(Props[TransformationFrontend], name = "frontend") val transformationFrontend = system.actorOf(Props[TransformationFrontend], name = "frontend")
transformationFrontend ! new TransformationJob("hello") transformationFrontend ! new TransformationJob("hello")
expectMsgPF() { expectMsgPF() {
// no backends yet, service unavailble // no backends yet, service unavailable
case f: JobFailed => case f: JobFailed =>
} }
} }

View file

@ -79,7 +79,7 @@ abstract class TransformationSampleSpec extends MultiNodeSpec(TransformationSamp
val transformationFrontend = system.actorOf(Props[TransformationFrontend], name = "frontend") val transformationFrontend = system.actorOf(Props[TransformationFrontend], name = "frontend")
transformationFrontend ! TransformationJob("hello") transformationFrontend ! TransformationJob("hello")
expectMsgPF() { expectMsgPF() {
// no backends yet, service unavailble // no backends yet, service unavailable
case JobFailed(_, TransformationJob("hello")) => case JobFailed(_, TransformationJob("hello")) =>
} }
} }

View file

@ -54,7 +54,7 @@ public class DiningHakkersOnFsm {
event((event, data) -> (event == Put) && (data.hakker == sender()), (event, data) -> event((event, data) -> (event == Put) && (data.hakker == sender()), (event, data) ->
goTo(CS.Available).using(new TakenBy(context().system().deadLetters())))); goTo(CS.Available).using(new TakenBy(context().system().deadLetters()))));
// Initialze the chopstick // Initialize the chopstick
initialize(); initialize();
} }
} }

View file

@ -46,7 +46,7 @@ class Chopstick extends Actor {
} }
/* /*
* A hakker is an awesome dude or dudett who either thinks about hacking or has to eat ;-) * A hakker is an awesome dude or dudette who either thinks about hacking or has to eat ;-)
*/ */
class Hakker(name: String, left: ActorRef, right: ActorRef) extends Actor { class Hakker(name: String, left: ActorRef, right: ActorRef) extends Actor {

View file

@ -53,7 +53,7 @@ class Chopstick extends Actor with FSM[ChopstickState, TakenBy] {
goto(Available) using TakenBy(system.deadLetters) goto(Available) using TakenBy(system.deadLetters)
} }
// Initialze the chopstick // Initialize the chopstick
initialize() initialize()
} }

View file

@ -46,7 +46,7 @@ class Chopstick extends Actor {
} }
/* /*
* A hakker is an awesome dude or dudett who either thinks about hacking or has to eat ;-) * A hakker is an awesome dude or dudette who either thinks about hacking or has to eat ;-)
*/ */
class Hakker(name: String, chair: Int) extends Actor { class Hakker(name: String, chair: Int) extends Actor {

View file

@ -105,7 +105,7 @@ class TestActorRef[T <: Actor](
def watch(subject: ActorRef): ActorRef = underlying.watch(subject) def watch(subject: ActorRef): ActorRef = underlying.watch(subject)
/** /**
* Deregisters this actor from being a death monitor of the provided ActorRef * Unregisters this actor from being a death monitor of the provided ActorRef
* This means that this actor will not get a Terminated()-message when the provided actor * This means that this actor will not get a Terminated()-message when the provided actor
* is permanently terminated. * is permanently terminated.
* *

View file

@ -20,7 +20,7 @@ import akka.util.BoxedType
/** /**
* Implementation helpers of the EventFilter facilities: send `Mute` * Implementation helpers of the EventFilter facilities: send `Mute`
* to the TestEventListener to install a filter, and `UnMute` to * to the TestEventListener to install a filter, and `UnMute` to
* deinstall it. * uninstall it.
* *
* You should always prefer the filter methods in the package object * You should always prefer the filter methods in the package object
* (see [[akka.testkit]] `filterEvents` and `filterException`) or on the * (see [[akka.testkit]] `filterEvents` and `filterException`) or on the
@ -31,7 +31,7 @@ sealed trait TestEvent
/** /**
* Implementation helpers of the EventFilter facilities: send <code>Mute</code> * Implementation helpers of the EventFilter facilities: send <code>Mute</code>
* to the TestEventFilter to install a filter, and <code>UnMute</code> to * to the TestEventFilter to install a filter, and <code>UnMute</code> to
* deinstall it. * uninstall it.
* *
* You should always prefer the filter methods in the package object * You should always prefer the filter methods in the package object
* (see [[akka.testkit]] `filterEvents` and `filterException`) or on the * (see [[akka.testkit]] `filterEvents` and `filterException`) or on the

View file

@ -13,7 +13,7 @@ import akka.dispatch.MailboxType
import scala.reflect.ClassTag import scala.reflect.ClassTag
/** /**
* This is a specialised form of the TestActorRef with support for querying and * This is a specialized form of the TestActorRef with support for querying and
* setting the state of a FSM. Use a LoggingFSM with this class if you also * setting the state of a FSM. Use a LoggingFSM with this class if you also
* need to inspect event traces. * need to inspect event traces.
* *

View file

@ -389,7 +389,7 @@ trait TestKitBase {
* partial function matches and returns false. Use it to ignore certain * partial function matches and returns false. Use it to ignore certain
* messages while waiting for a specific message. * messages while waiting for a specific message.
* *
* @return the last received messsage, i.e. the first one for which the * @return the last received message, i.e. the first one for which the
* partial function returned true * partial function returned true
*/ */
def fishForMessage(max: Duration = Duration.Undefined, hint: String = "")(f: PartialFunction[Any, Boolean]): Any = { def fishForMessage(max: Duration = Duration.Undefined, hint: String = "")(f: PartialFunction[Any, Boolean]): Any = {

View file

@ -8,7 +8,7 @@ import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.TimeUnit._ import java.util.concurrent.TimeUnit._
/** /**
* Specialised "one-shot" Timer. * Specialized "one-shot" Timer.
* Given a known number of operations performed within a time span (to be measured) it displays the average time one operation took. * Given a known number of operations performed within a time span (to be measured) it displays the average time one operation took.
* *
* Please note that this is a *very coarse* estimation; The gain though is that we do not have to perform the counting inside of the measured thing (we can adding in tight loops). * Please note that this is a *very coarse* estimation; The gain though is that we do not have to perform the counting inside of the measured thing (we can adding in tight loops).

View file

@ -27,7 +27,7 @@ private[akka] trait MetricsKitOps extends MetricKeyDSL {
/** /**
* Used to measure timing of known number of operations over time. * Used to measure timing of known number of operations over time.
* While not being the most percise, it allows to measure a coarse op/s without injecting counters to the measured operation (potentially hot-loop). * While not being the most precise, it allows to measure a coarse op/s without injecting counters to the measured operation (potentially hot-loop).
* *
* Do not use for short running pieces of code. * Do not use for short running pieces of code.
*/ */

View file

@ -104,7 +104,7 @@ final case class PostRestart(failure: Throwable) extends Signal
* *
* <b>IMPORTANT NOTE:</b> if the actor terminated by switching to the * <b>IMPORTANT NOTE:</b> if the actor terminated by switching to the
* `Stopped` behavior then this signal will be ignored (i.e. the * `Stopped` behavior then this signal will be ignored (i.e. the
* Stopped behvavior will do nothing in reaction to it). * Stopped behavior will do nothing in reaction to it).
*/ */
@SerialVersionUID(1L) @SerialVersionUID(1L)
final case object PostStop extends Signal final case object PostStop extends Signal

View file

@ -53,7 +53,7 @@ object Receptionist {
final case class Find[T](key: ServiceKey[T])(val replyTo: ActorRef[Listing[T]]) extends Command final case class Find[T](key: ServiceKey[T])(val replyTo: ActorRef[Listing[T]]) extends Command
/** /**
* Confirmtion that the given [[akka.typed.ActorRef]] has been associated with the [[ServiceKey]]. * Confirmation that the given [[akka.typed.ActorRef]] has been associated with the [[ServiceKey]].
*/ */
final case class Registered[T](key: ServiceKey[T], address: ActorRef[T]) final case class Registered[T](key: ServiceKey[T], address: ActorRef[T])
/** /**

View file

@ -48,7 +48,7 @@ object OSGi {
val osgiOptionalImports = Seq( val osgiOptionalImports = Seq(
// needed because testkit is normally not used in the application bundle, // needed because testkit is normally not used in the application bundle,
// but it should still be included as transitive dependency and used by BundleDelegatingClassLoader // but it should still be included as transitive dependency and used by BundleDelegatingClassLoader
// to be able to find refererence.conf // to be able to find reference.conf
"akka.testkit", "akka.testkit",
"com.google.protobuf") "com.google.protobuf")

View file

@ -41,7 +41,7 @@ object Sample {
val dependencies = buildDependencies.value val dependencies = buildDependencies.value
val classpathWithProjectDependencies = dependencies.classpath.map { val classpathWithProjectDependencies = dependencies.classpath.map {
case (proj, deps) if proj.project == project.id => case (proj, deps) if proj.project == project.id =>
// add project dependency for every akka library dependnecy // add project dependency for every akka library dependency
(proj, deps ++ projectDependencies.map(ResolvedClasspathDependency(_, None))) (proj, deps ++ projectDependencies.map(ResolvedClasspathDependency(_, None)))
case (project, deps) => (project, deps) case (project, deps) => (project, deps)
} }