update more akka refs to use pekko name (#200)

This commit is contained in:
PJ Fanning 2023-02-17 10:49:40 +01:00 committed by GitHub
parent 79b0189d70
commit c32fcf1f0a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
69 changed files with 139 additions and 137 deletions

View file

@ -27,7 +27,7 @@ import pekko.testkit.TestKit
class DispatcherShutdownSpec extends AnyWordSpec with Matchers {
"akka dispatcher" should {
"pekko dispatcher" should {
"eventually shutdown when used after system terminate" in {

View file

@ -73,7 +73,7 @@ object Terminated {
* once. Registration does not need to happen before the Actor terminates, a
* notification is guaranteed to arrive after both registration and termination
* have occurred. This message is also sent when the watched actor is on a node
* that has been removed from the cluster when using Akka Cluster.
* that has been removed from the cluster when using Pekko Cluster.
*
* @param ref Scala API: the `ActorRef` for the terminated actor
*/

View file

@ -180,7 +180,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi
/**
* Register for [[Terminated]] notification once the Actor identified by the
* given [[ActorRef]] terminates. This message is also sent when the watched actor
* is on a node that has been removed from the cluster when using Akka Cluster.
* is on a node that has been removed from the cluster when using Pekko Cluster.
*
* `watch` is idempotent if it is not mixed with `watchWith`.
*
@ -195,7 +195,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi
/**
* Register for termination notification with a custom message once the Actor identified by the
* given [[ActorRef]] terminates. This message is also sent when the watched actor
* is on a node that has been removed from the cluster when using Akka Cluster.
* is on a node that has been removed from the cluster when using Pekko Cluster.
*
* `watchWith` is idempotent if it is called with the same `msg` and not mixed with `watch`.
*

View file

@ -163,7 +163,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi
/**
* Register for [[pekko.actor.typed.Terminated]] notification once the Actor identified by the
* given [[ActorRef]] terminates. This message is also sent when the watched actor
* is on a node that has been removed from the cluster when using Akka Cluster.
* is on a node that has been removed from the cluster when using Pekko Cluster.
*
* `watch` is idempotent if it is not mixed with `watchWith`.
*
@ -178,7 +178,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi
/**
* Register for termination notification with a custom message once the Actor identified by the
* given [[ActorRef]] terminates. This message is also sent when the watched actor
* is on a node that has been removed from the cluster when using using Akka Cluster.
* is on a node that has been removed from the cluster when using using Pekko Cluster.
*
* `watchWith` is idempotent if it is called with the same `msg` and not mixed with `watch`.
*

View file

@ -18,8 +18,8 @@ import scala.concurrent.ExecutionContext;
import scala.concurrent.duration.FiniteDuration;
/**
* An Akka scheduler service. This one needs one special behavior: if Closeable, it MUST execute all
* outstanding tasks upon .close() in order to properly shutdown all dispatchers.
* An Apache Pekko scheduler service. This one needs one special behavior: if Closeable, it MUST
* execute all outstanding tasks upon .close() in order to properly shutdown all dispatchers.
*
* <p>Furthermore, this timer service MUST throw IllegalStateException if it cannot schedule a task.
* Once scheduled, the task MUST be executed. If executed upon close(), the task may execute before

View file

@ -17,7 +17,7 @@ import java.lang.annotation.*;
/**
* Marks APIs that are designed under an closed-world assumption for and are NOT meant to be
* extended by user-code. It is fine to extend these classes within Akka itself however.
* extended by user-code. It is fine to extend these classes within Apache Pekko itself, however.
*
* <p>This is most useful for binary compatibility purposes when a set of classes and interfaces
* assume a "closed world" between them, and gain the ability to add methods to the interfaces
@ -26,7 +26,7 @@ import java.lang.annotation.*;
* / artifact, it is impossible to obtain a "old" class with a "new" interface, as they are part of
* the same dependency.
*
* <p>Notable examples of such API include the FlowOps trait in Akka Streams or Akka HTTP model
* <p>Notable examples of such API include the FlowOps trait in Pekko Streams or Pekko HTTP model
* interfaces, which extensively uses inheritance internally, but are not meant for extension by
* user code.
*/

View file

@ -16,8 +16,8 @@ package org.apache.pekko.annotation;
import java.lang.annotation.*;
/**
* Marks APIs that are considered internal to Akka and may change at any point in time without any
* warning.
* Marks APIs that are considered internal to Apache Pekko and may change at any point in time
* without any warning.
*
* <p>For example, this annotation should be used when the Scala {@code private[pekko]} access
* restriction is used, as Java has no way of representing this package restricted access and such

View file

@ -20,9 +20,9 @@ import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Marks APIs that are considered internal to Akka and should not be accessed by user code but that
* are used across Akka project boundaries and therefore shouldn't be changed without considering
* possible usage outside of the Akka core modules.
* Marks APIs that are considered internal to Apache Pekko and should not be accessed by user code
* but that are used across Apache Pekko project boundaries and therefore shouldn't be changed
* without considering possible usage outside of the Apache Pekko core modules.
*
* <p>If a method/class annotated with this annotation is part of a public API and has the Scala
* {@code private[pekko]} access restriction, which leads to a public method from Java, there should

View file

@ -16,7 +16,7 @@ package org.apache.pekko
/**
* This type is used in generic type signatures wherever the actual value is of no importance.
* It is a combination of Scalas `Unit` and Javas `Void`, which both have different issues when
* used from the other language. An example use-case is the materialized value of an Akka Stream for cases
* used from the other language. An example use-case is the materialized value of an Pekko Stream for cases
* where no result shall be returned from materialization.
*/
sealed abstract class NotUsed

View file

@ -36,7 +36,7 @@ trait OnlyCauseStackTrace { self: Throwable =>
}
/**
* This exception is thrown when Akka detects a problem with the provided configuration
* This exception is thrown when Apache Pekko detects a problem with the provided configuration
*/
class ConfigurationException(message: String, cause: Throwable) extends PekkoException(message, cause) {
def this(msg: String) = this(msg, null)

View file

@ -384,7 +384,7 @@ trait DiagnosticActorLogging extends Actor {
object Actor {
/**
* Type alias representing a Receive-expression for Akka Actors.
* Type alias representing a Receive-expression for Pekko Actors.
*/
// #receive
type Receive = PartialFunction[Any, Unit]

View file

@ -367,7 +367,7 @@ private[pekko] trait Cell {
}
/**
* Everything in here is completely Akka PRIVATE. You will not find any
* Everything in here is completely Pekko PRIVATE. You will not find any
* supported APIs in this place. This is not the API you were looking
* for! (waves hand)
*/
@ -412,7 +412,7 @@ private[pekko] object ActorCell {
//vars don't need volatile since it's protected with the mailbox status
//Make sure that they are not read/written outside of a message processing (systemInvoke/invoke)
/**
* Everything in here is completely Akka PRIVATE. You will not find any
* Everything in here is completely Pekko PRIVATE. You will not find any
* supported APIs in this place. This is not the API you were looking
* for! (waves hand)
*/

View file

@ -26,11 +26,11 @@ import pekko.event.LogMarker
object ActorLogMarker {
/**
* Marker "akkaDeadLetter" of log event for dead letter messages.
* Marker "pekkoDeadLetter" of log event for dead letter messages.
*
* @param messageClass The message class of the DeadLetter. Included as property "akkaMessageClass".
* @param messageClass The message class of the DeadLetter. Included as property "pekkoMessageClass".
*/
def deadLetter(messageClass: String): LogMarker =
LogMarker("akkaDeadLetter", Map(LogMarker.Properties.MessageClass -> messageClass))
LogMarker("pekkoDeadLetter", Map(LogMarker.Properties.MessageClass -> messageClass))
}

View file

@ -35,7 +35,7 @@ import pekko.util.OptionVal
/**
* Interface for all ActorRef providers to implement.
* Not intended for extension outside of Akka.
* Not intended for extension outside of Apache Pekko.
*/
@DoNotInherit trait ActorRefProvider {

View file

@ -484,7 +484,7 @@ object ActorSystem {
if (ConfigVersion != Version)
throw new pekko.ConfigurationException(
"Akka JAR version [" + Version + "] does not match the provided config version [" + ConfigVersion + "]")
"Pekko JAR version [" + Version + "] does not match the provided config version [" + ConfigVersion + "]")
/**
* Returns the String representation of the Config that this Settings is backed by

View file

@ -308,7 +308,7 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits {
}
/**
* An Akka SupervisorStrategy is the policy to apply for crashing children.
* A Pekko SupervisorStrategy is the policy to apply for crashing children.
*
* <b>IMPORTANT:</b>
*

View file

@ -33,7 +33,7 @@ import pekko.util.JavaDurationConverters
private final case class SchedulerException(msg: String) extends pekko.PekkoException(msg) with NoStackTrace
/**
* An Akka scheduler service.
* An Apache Pekko scheduler service.
*
* For scheduling within actors `with Timers` should be preferred.
*

View file

@ -228,7 +228,7 @@ private[pekko] class Mailboxes(
case "unbounded" => UnboundedMailbox()
case "bounded" => new BoundedMailbox(settings, config(id))
case _ if id.startsWith(BoundedCapacityPrefix) =>
// hack to allow programmatic set of capacity through props in akka-typed but still share
// hack to allow programmatic set of capacity through props in pekko-typed but still share
// mailbox configurators for the same size
val capacity = id.split(':')(1).toInt
new BoundedMailbox(capacity, Duration.Zero)

View file

@ -23,7 +23,7 @@ import pekko.event.Logging.simpleName
import pekko.util.Subclassification
/**
* An Akka EventStream is a pub-sub stream of events both system and user generated,
* An Apache Pekko EventStream is a pub-sub stream of events both system and user generated,
* where subscribers are ActorRefs and the channels are Classes and Events are any java.lang.Object.
* EventStreams employ SubchannelClassification, which means that if you listen to a Class,
* you'll receive any message that is of that type or a subtype.

View file

@ -1731,9 +1731,9 @@ object LogMarker {
* INTERNAL API
*/
@InternalApi private[pekko] object Properties {
val MessageClass = "akkaMessageClass"
val RemoteAddress = "akkaRemoteAddress"
val RemoteAddressUid = "akkaRemoteAddressUid"
val MessageClass = "pekkoMessageClass"
val RemoteAddress = "pekkoRemoteAddress"
val RemoteAddressUid = "pekkoRemoteAddressUid"
}
}

View file

@ -19,9 +19,9 @@ import pekko.actor.Actor
/**
* Where as it is possible to plug in alternative DNS implementations it is not recommended.
*
* It is expected that this will be deprecated/removed in future Akka versions
* It is expected that this will be deprecated/removed in future Apache Pekko versions
*
* TODO make private and remove deprecated in 2.7.0
* TODO make private and remove deprecated in v1.1.0
*/
@deprecated("Overriding the DNS implementation will be removed in future versions of Akka", "2.6.0")
trait DnsProvider {

View file

@ -141,7 +141,7 @@ object JavaPartialFunction {
* }
* }}}
*
* The typical use of partial functions from Akka looks like the following:
* The typical use of partial functions from Apache Pekko looks like the following:
*
* {{{
* if (pf.isDefinedAt(x)) {

View file

@ -57,8 +57,8 @@ object Backoff {
* be restarted with an exponentially increasing back off until the resource is available again.
*
* '''***
* This supervisor should not be used with `Akka Persistence` child actors.
* `Akka Persistence` actors shutdown unconditionally on `persistFailure()`s rather
* This supervisor should not be used with `Pekko Persistence` child actors.
* `Pekko Persistence` actors shutdown unconditionally on `persistFailure()`s rather
* than throw an exception on a failure like normal actors.
* [[#onStop]] should be used instead for cases where the child actor
* terminates itself as a failure signal instead of the normal behavior of throwing an exception.
@ -117,8 +117,8 @@ object Backoff {
* be restarted with an exponentially increasing back off until the resource is available again.
*
* '''***
* This supervisor should not be used with `Akka Persistence` child actors.
* `Akka Persistence` actors shutdown unconditionally on `persistFailure()`s rather
* This supervisor should not be used with `Pekko Persistence` child actors.
* `Pekko Persistence` actors shutdown unconditionally on `persistFailure()`s rather
* than throw an exception on a failure like normal actors.
* [[#onStop]] should be used instead for cases where the child actor
* terminates itself as a failure signal instead of the normal behavior of throwing an exception.
@ -172,8 +172,8 @@ object Backoff {
* be restarted with an exponentially increasing back off until the resource is available again.
*
* '''***
* This supervisor should not be used with `Akka Persistence` child actors.
* `Akka Persistence` actors shutdown unconditionally on `persistFailure()`s rather
* This supervisor should not be used with `Pekko Persistence` child actors.
* `Pekko Persistence` actors shutdown unconditionally on `persistFailure()`s rather
* than throw an exception on a failure like normal actors.
* [[#onStop]] should be used instead for cases where the child actor
* terminates itself as a failure signal instead of the normal behavior of throwing an exception.
@ -232,8 +232,8 @@ object Backoff {
* be restarted with an exponentially increasing back off until the resource is available again.
*
* '''***
* This supervisor should not be used with `Akka Persistence` child actors.
* `Akka Persistence` actors shutdown unconditionally on `persistFailure()`s rather
* This supervisor should not be used with `Pekko Persistence` child actors.
* `Pekko Persistence` actors shutdown unconditionally on `persistFailure()`s rather
* than throw an exception on a failure like normal actors.
* [[#onStop]] should be used instead for cases where the child actor
* terminates itself as a failure signal instead of the normal behavior of throwing an exception.

View file

@ -53,8 +53,8 @@ object BackoffOpts {
* be restarted with an exponentially increasing back off until the resource is available again.
*
* '''***
* This supervisor should not be used with `Akka Persistence` child actors.
* `Akka Persistence` actors shutdown unconditionally on `persistFailure()`s rather
* This supervisor should not be used with `Pekko Persistence` child actors.
* `Pekko Persistence` actors shutdown unconditionally on `persistFailure()`s rather
* than throw an exception on a failure like normal actors.
* [[#onStop]] should be used instead for cases where the child actor
* terminates itself as a failure signal instead of the normal behavior of throwing an exception.
@ -107,8 +107,8 @@ object BackoffOpts {
* be restarted with an exponentially increasing back off until the resource is available again.
*
* '''***
* This supervisor should not be used with `Akka Persistence` child actors.
* `Akka Persistence` actors shutdown unconditionally on `persistFailure()`s rather
* This supervisor should not be used with `Pekko Persistence` child actors.
* `Pekko Persistence` actors shutdown unconditionally on `persistFailure()`s rather
* than throw an exception on a failure like normal actors.
* [[#onStop]] should be used instead for cases where the child actor
* terminates itself as a failure signal instead of the normal behavior of throwing an exception.

View file

@ -15,7 +15,7 @@ package org.apache.pekko
package pattern
/**
* == Extended Versions Of Akka Patterns ==
* == Extended Versions Of Pekko Patterns ==
*
* This subpackage contains extended versions of Akka patterns.
*

View file

@ -104,7 +104,7 @@ object Serializers {
* For serialization of data that need to evolve over time the `SerializerWithStringManifest` is recommended instead
* of [[Serializer]] because the manifest (type hint) is a `String` instead of a `Class`. That means
* that the class can be moved/removed and the serializer can still deserialize old data by matching
* on the `String`. This is especially useful for Akka Persistence.
* on the `String`. This is especially useful for Pekko Persistence.
*
* The manifest string can also encode a version number that can be used in `fromBinary` to
* deserialize in different ways to migrate old data to new domain objects.
@ -179,7 +179,7 @@ abstract class SerializerWithStringManifest extends Serializer {
* Implementations should typically extend [[SerializerWithStringManifest]] and
* in addition to the `ByteBuffer` based `toBinary` and `fromBinary` methods also
* implement the array based `toBinary` and `fromBinary` methods. The array based
* methods will be used when `ByteBuffer` is not used, e.g. in Akka Persistence.
* methods will be used when `ByteBuffer` is not used, e.g. in Pekko Persistence.
*
* Note that the array based methods can for example be implemented by delegation
* like this:

View file

@ -31,7 +31,7 @@ import pekko.actor.ExtensionIdProvider
import pekko.event.Logging
/**
* Akka extension that extracts [[ManifestInfo.Version]] information from META-INF/MANIFEST.MF in jar files
* Apache Pekko extension that extracts [[ManifestInfo.Version]] information from META-INF/MANIFEST.MF in jar files
* on the classpath of the `ClassLoader` of the `ActorSystem`.
*/
object ManifestInfo extends ExtensionId[ManifestInfo] with ExtensionIdProvider {

View file

@ -1,4 +1,4 @@
# Akka Microbenchmarks
# Apache Pekko Microbenchmarks
This subproject contains some microbenchmarks excercising key parts of Akka. (Excluding typed which has its
own jmh module)

View file

@ -100,7 +100,7 @@ lazy val root = Project(id = "pekko", base = file("."))
benchJmh,
protobuf,
protobufV3,
akkaScalaNightly,
pekkoScalaNightly,
docs,
serialversionRemoverPlugin))
.settings(
@ -126,7 +126,7 @@ lazy val actorTests = pekkoModule("actor-tests")
.enablePlugins(NoPublish)
.disablePlugins(MimaPlugin)
lazy val akkaScalaNightly = pekkoModule("scala-nightly")
lazy val pekkoScalaNightly = pekkoModule("scala-nightly")
.aggregate(aggregatedProjects: _*)
.disablePlugins(MimaPlugin)
.disablePlugins(ValidatePullRequest, MimaPlugin, CopyrightHeaderInPr)

View file

@ -47,7 +47,7 @@ object ShardedDaemonProcessSettings {
}
/**
* Not for user constructions, use factory methods to instanciate.
* Not for user constructions, use factory methods to instantiate.
*/
final class ShardedDaemonProcessSettings @InternalApi private[pekko] (
val keepAliveInterval: FiniteDuration,

View file

@ -30,7 +30,7 @@ import pekko.persistence.typed.ReplicationId
* INTERNAL API
*
* Used when sharding Replicated Event Sourced entities in multiple instances of sharding, for example one per DC in a Multi DC
* Akka Cluster.
* Pekko Cluster.
*
* This actor should be started once on each node where Replicated Event Sourced entities will run (the same nodes that you start
* sharding on). The entities should be set up with [[pekko.persistence.typed.scaladsl.EventSourcedBehavior.withEventPublishing]]

View file

@ -135,7 +135,7 @@ object ClusterSharding {
* i.e. new members in the cluster. This strategy can be replaced by an application specific
* implementation.
*
* The state of shard locations in the `ShardCoordinator` is stored with `akka-distributed-data` or
* The state of shard locations in the `ShardCoordinator` is stored with `pekko-distributed-data` or
* `pekko-persistence` to survive failures. When a crashed or unreachable coordinator
* node has been removed (via down) from the cluster a new `ShardCoordinator` singleton
* actor will take over and the state is recovered. During such a failure period shards

View file

@ -32,7 +32,7 @@ object ShardedDaemonProcess {
* This extension runs a pre set number of actors in a cluster.
*
* The typical use case is when you have a task that can be divided in a number of workers, each doing a
* sharded part of the work, for example consuming the read side events from Akka Persistence through
* sharded part of the work, for example consuming the read side events from Pekko Persistence through
* tagged events where each tag decides which consumer that should consume the event.
*
* Each named set needs to be started on all the nodes of the cluster on start up.

View file

@ -136,7 +136,7 @@ object ClusterSharding extends ExtensionId[ClusterSharding] {
* must be to begin the rebalancing. This strategy can be replaced by an application specific
* implementation.
*
* The state of shard locations in the `ShardCoordinator` is stored with `akka-distributed-data` or
* The state of shard locations in the `ShardCoordinator` is stored with `pekko-distributed-data` or
* `pekko-persistence` to survive failures. When a crashed or unreachable coordinator
* node has been removed (via down) from the cluster a new `ShardCoordinator` singleton
* actor will take over and the state is recovered. During such a failure period shards

View file

@ -35,7 +35,7 @@ object ShardedDaemonProcess extends ExtensionId[ShardedDaemonProcess] {
* This extension runs a pre set number of actors in a cluster.
*
* The typical use case is when you have a task that can be divided in a number of workers, each doing a
* sharded part of the work, for example consuming the read side events from Akka Persistence through
* sharded part of the work, for example consuming the read side events from Pekko Persistence through
* tagged events where each tag decides which consumer that should consume the event.
*
* Each named set needs to be started on all the nodes of the cluster on start up.

View file

@ -20,7 +20,7 @@ import org.apache.pekko.actor.{ ActorSystem, ExtendedActorSystem }
/**
* This will work on JDK11 and JDK8 built with the enable-jfr flag (8u262+).
*
* For Akka JRF recordings you may need to run a publish for multi jvm tests
* For Pekko JRF recordings you may need to run a publish for multi jvm tests
* to get the ComileJDK9 things compiled.
*/
class FlightRecording(system: ActorSystem) {

View file

@ -130,7 +130,7 @@ import pekko.util.ccompat.JavaConverters._
* specific implementation.
*
* '''Recovery''':
* The state of shard locations in the `ShardCoordinator` is stored with `akka-distributed-data` or
* The state of shard locations in the `ShardCoordinator` is stored with `pekko-distributed-data` or
* `pekko-persistence` to survive failures. When a crashed or unreachable coordinator
* node has been removed (via down) from the cluster a new `ShardCoordinator` singleton
* actor will take over and the state is recovered. During such a failure period shards

View file

@ -33,12 +33,12 @@ import pekko.persistence.journal.leveldb.SharedLeveldbStore
import scala.annotation.nowarn
/**
* Utility program that removes the internal data stored with Akka Persistence
* Utility program that removes the internal data stored with Pekko Persistence
* by the Cluster `ShardCoordinator`. The data contains the locations of the
* shards using Akka Persistence and it can safely be removed when restarting
* the whole Akka Cluster. Note that this is not application data.
* shards using Pekko Persistence and it can safely be removed when restarting
* the whole Pekko Cluster. Note that this is not application data.
*
* <b>Never use this program while there are running Akka Cluster that is
* <b>Never use this program while there are running Pekko Cluster that is
* using Cluster Sharding. Stop all Cluster nodes before using this program.</b>
*
* It can be needed to remove the data if the Cluster `ShardCoordinator`

View file

@ -67,7 +67,7 @@ private[pekko] object EventSourcedRememberEntitiesShardStore {
/**
* INTERNAL API
*
* Persistent actor keeping the state for Akka Persistence backed remember entities (enabled through `state-store-mode=persistence`).
* Persistent actor keeping the state for Pekko Persistence backed remember entities (enabled through `state-store-mode=persistence`).
*
* @see [[ClusterSharding$ ClusterSharding extension]]
*/

View file

@ -119,7 +119,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
// ClusterJmx is initialized as the last thing in the constructor
private var clusterJmx: Option[ClusterJmx] = None
logInfo("Starting up, Akka version [{}] ...", system.settings.ConfigVersion)
logInfo("Starting up, Pekko version [{}] ...", system.settings.ConfigVersion)
val failureDetector: FailureDetectorRegistry[Address] = {
val createFailureDetector = () =>

View file

@ -52,9 +52,9 @@ import pekko.util.Helpers.toRootLowerCase
* When this happens the guaranteed uniqueness will no longer be true resulting in undesirable behavior
* in the system.
*
* This is even more severe when Akka Persistence is used in conjunction with Cluster Sharding.
* This is even more severe when Pekko Persistence is used in conjunction with Cluster Sharding.
* In this case, the lack of unique actors can cause multiple actors to write to the same journal.
* Akka Persistence operates on a single writer principle. Having multiple writers will corrupt
* Pekko Persistence operates on a single writer principle. Having multiple writers will corrupt
* the journal and make it unusable.
*
* Finally, even if you don't use features such as Persistence, Sharding, or Singletons, auto-downing can lead the

View file

@ -236,7 +236,7 @@ with an application-specific protocol. The benefits of this approach are:
* Improved protocol definition between client and server
* Usage of [Pekko gRPC Service Discovery]($pekko.doc.dns$/docs/pekko-grpc/current/client/configuration.html#using-pekko-discovery-for-endpoint-discovery)
* Clients do not need to use Pekko
* See also [gRPC versus Pekko Remoting]($pekko.doc.dns$/docs/pekko-grpc/current/whygrpc.html#grpc-vs-akka-remoting)
* See also [gRPC versus Pekko Remoting]($pekko.doc.dns$/docs/pekko-grpc/current/whygrpc.html#grpc-vs-pekko-remoting)
### Migrating directly

View file

@ -309,7 +309,7 @@ they were still served on the default dispatcher.
This is the recommended way of dealing with any kind of blocking in reactive
applications.
For a similar discussion specifically about Pekko HTTP, refer to @extref[Handling blocking operations in Pekko HTTP](pekko.http:handling-blocking-operations-in-akka-http-routes.html).
For a similar discussion specifically about Pekko HTTP, refer to @extref[Handling blocking operations in Pekko HTTP](pekko.http:handling-blocking-operations-in-pekko-http-routes.html).
### Available solutions to blocking operations

View file

@ -56,7 +56,7 @@ class OsgiActorSystemFactory(
config.withFallback(
ConfigFactory
.load(classloader)
.withFallback(ConfigFactory.defaultReference(OsgiActorSystemFactory.akkaActorClassLoader)))
.withFallback(ConfigFactory.defaultReference(OsgiActorSystemFactory.pekkoActorClassLoader)))
}
/**
@ -73,11 +73,11 @@ object OsgiActorSystemFactory {
/**
* Class loader of pekko-actor bundle.
*/
def akkaActorClassLoader = classOf[ActorSystemActivator].getClassLoader
def pekkoActorClassLoader = classOf[ActorSystemActivator].getClassLoader
/*
* Create an [[OsgiActorSystemFactory]] instance to set up Akka in an OSGi environment
*/
def apply(context: BundleContext, config: Config): OsgiActorSystemFactory =
new OsgiActorSystemFactory(context, Some(akkaActorClassLoader), config)
new OsgiActorSystemFactory(context, Some(pekkoActorClassLoader), config)
}

View file

@ -19,7 +19,7 @@ import pekko.annotation.DoNotInherit
import pekko.annotation.InternalApi
/**
* Supertype for all Akka Persistence Typed specific signals
* Supertype for all Pekko Persistence Typed specific signals
*
* Not for user extension
*/

View file

@ -142,7 +142,7 @@ object EventSourcedBehavior {
* Allows the event sourced behavior to react on signals.
*
* The regular lifecycle signals can be handled as well as
* Akka Persistence specific signals (snapshot and recovery related). Those are all subtypes of
* Pekko Persistence specific signals (snapshot and recovery related). Those are all subtypes of
* [[pekko.persistence.typed.EventSourcedSignal]]
*/
def receiveSignal(signalHandler: PartialFunction[(State, Signal), Unit]): EventSourcedBehavior[Command, Event, State]

View file

@ -172,7 +172,7 @@ object SharedLeveldbJournal {
Persistence(system).journalFor(null) ! AsyncWriteProxy.SetStore(store)
/**
* Configuration to enable `TestJavaSerializer` in `akka-testkit` for
* Configuration to enable `TestJavaSerializer` in `pekko-testkit` for
* for the messages used by `SharedLeveldbJournal`.
*
* For testing only.

View file

@ -4,7 +4,7 @@
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* This file is part of the Apache Pekko project, which was derived from Akka.
* This file is part of the Apache Pekko project, derived from Akka.
*/
import sbt.Keys._

View file

@ -382,7 +382,7 @@ object Dependencies {
val benchJmh = l ++= Seq(logback, Provided.levelDB, Provided.levelDBNative, Compile.jctools)
// akka stream
// pekko stream
lazy val stream = l ++= Seq[sbt.ModuleID](reactiveStreams, sslConfigCore.value, TestDependencies.scalatest.value)

View file

@ -148,7 +148,7 @@ object UnidocRoot extends AutoPlugin {
.ifTrue(sbtunidoc.ScalaUnidocPlugin && sbtunidoc.JavaUnidocPlugin && sbtunidoc.GenJavadocPlugin)
.getOrElse(sbtunidoc.ScalaUnidocPlugin)
val akkaSettings = UnidocRoot.CliOptions.genjavadocEnabled
val pekkoSettings = UnidocRoot.CliOptions.genjavadocEnabled
.ifTrue(Seq(
JavaUnidoc / unidoc / javacOptions := {
if (JdkOptions.isJdk8) Seq("-Xdoclint:none")

View file

@ -94,7 +94,7 @@ object JdkOptions extends AutoPlugin {
jdk11options(java8home)
case None =>
throw new MessageOnlyException(
"A JDK 8 installation was not found, but is required to build Akka. To manually specify a JDK 8 installation, use the \"set every jdk8home := \\\"/path/to/jdk\\\" sbt command. If you have no JDK 8 installation, target your system JDK with the \"set every targetSystemJdk := true\" sbt command, but beware resulting artifacts will not work on JDK 8")
"A JDK 8 installation was not found, but is required to build Apache Pekko. To manually specify a JDK 8 installation, use the \"set every jdk8home := \\\"/path/to/jdk\\\" sbt command. If you have no JDK 8 installation, target your system JDK with the \"set every targetSystemJdk := true\" sbt command, but beware resulting artifacts will not work on JDK 8")
}
val targetJdkSettings = Seq(targetSystemJdk := false, jdk8home := sys.env.get("JAVA_8_HOME").getOrElse(""))

View file

@ -32,7 +32,7 @@ object MiMa extends AutoPlugin {
override val projectSettings = Seq(
mimaReportSignatureProblems := true,
mimaPreviousArtifacts := akkaPreviousArtifacts(name.value, organization.value, scalaBinaryVersion.value),
mimaPreviousArtifacts := pekkoPreviousArtifacts(name.value, organization.value, scalaBinaryVersion.value),
checkMimaFilterDirectories := checkFilterDirectories(baseDirectory.value))
def checkFilterDirectories(moduleRoot: File): Unit = {
@ -44,7 +44,7 @@ object MiMa extends AutoPlugin {
}
}
def akkaPreviousArtifacts(
def pekkoPreviousArtifacts(
projectName: String,
organization: String,
scalaBinaryVersion: String): Set[sbt.ModuleID] = {

View file

@ -61,14 +61,14 @@ object MultiNode extends AutoPlugin {
// -DMultiJvm.akka.cluster.Stress.nrOfNodes=15
val MultinodeJvmArgs = "multinode\\.(D|X)(.*)".r
val knownPrefix = Set("akka.", "MultiJvm.", "aeron.")
val akkaProperties = System.getProperties.stringPropertyNames.asScala.toList.collect {
val pekkoProperties = System.getProperties.stringPropertyNames.asScala.toList.collect {
case MultinodeJvmArgs(a, b) =>
val value = System.getProperty("multinode." + a + b)
"-" + a + b + (if (value == "") "" else "=" + value)
case key: String if knownPrefix.exists(pre => key.startsWith(pre)) => "-D" + key + "=" + System.getProperty(key)
}
"-Xmx256m" :: akkaProperties ::: CliOptions.sbtLogNoFormat.ifTrue("-Dpekko.test.nocolor=true").toList
"-Xmx256m" :: pekkoProperties ::: CliOptions.sbtLogNoFormat.ifTrue("-Dpekko.test.nocolor=true").toList
} ++ JdkOptions.versionSpecificJavaOptions
private val anyConfigsInThisProject = ScopeFilter(configurations = inAnyConfiguration)

View file

@ -129,13 +129,13 @@ object OSGi {
def defaultImports(scalaVersion: String) =
Seq(
"!sun.misc",
akkaImport(),
pekkoImport(),
configImport(),
"!scala.compat.java8.*",
"!scala.util.parsing.*",
scalaImport(scalaVersion),
"*")
def akkaImport(packageName: String = "org.apache.pekko.*") = versionedImport(packageName, "2.6", "2.7")
def pekkoImport(packageName: String = "org.apache.pekko.*") = versionedImport(packageName, "1.0", "1.1")
def configImport(packageName: String = "com.typesafe.config.*") = versionedImport(packageName, "1.4.0", "1.5.0")
def scalaImport(version: String) = {
val packageName = "scala.*"

View file

@ -43,7 +43,7 @@ object PekkoBuild {
lazy val rootSettings = Def.settings(
commands += switchVersion,
UnidocRoot.akkaSettings,
UnidocRoot.pekkoSettings,
Protobuf.settings,
GlobalScope / parallelExecution := System
.getProperty("pekko.parallelExecution", parallelExecutionByDefault.toString)

View file

@ -33,9 +33,9 @@ object TestExtras {
import Keys._
private[Filter] object Params {
val testNamesExclude = systemPropertyAsSeq("akka.test.names.exclude").toSet
val testTagsExlcude = systemPropertyAsSeq("akka.test.tags.exclude").toSet
val testTagsOnly = systemPropertyAsSeq("akka.test.tags.only").toSet
val testNamesExclude = systemPropertyAsSeq("pekko.test.names.exclude").toSet
val testTagsExlcude = systemPropertyAsSeq("pekko.test.tags.exclude").toSet
val testTagsOnly = systemPropertyAsSeq("pekko.test.tags.only").toSet
}
def settings = {

View file

@ -62,7 +62,7 @@ object PekkoValidatePullRequest extends AutoPlugin {
})(_ || _)
},
validatePullRequestBuildAll / excludeFilter := PathGlobFilter("project/MiMa.scala"),
prValidatorGithubRepository := Some("akka/akka"),
prValidatorGithubRepository := Some("apache/incubator-pekko"),
prValidatorTargetBranch := "origin/main")
override lazy val projectSettings = inConfig(ValidatePR)(Defaults.testTasks) ++ Seq(

View file

@ -9720,7 +9720,7 @@ public final class WireFormats {
/**
* <pre>
**
* Message format of Akka Protocol.
* Message format of Pekko Protocol.
* Message contains either a payload or an instruction.
* </pre>
*
@ -10046,7 +10046,7 @@ public final class WireFormats {
/**
* <pre>
**
* Message format of Akka Protocol.
* Message format of Pekko Protocol.
* Message contains either a payload or an instruction.
* </pre>
*

View file

@ -110,11 +110,11 @@ message DeployData {
/******************************************
* Akka Protocol message formats
* Pekko Protocol message formats
******************************************/
/**
* Message format of Akka Protocol.
* Message format of Pekko Protocol.
* Message contains either a payload or an instruction.
*/
message PekkoProtocolMessage {

View file

@ -1245,6 +1245,6 @@ private[remote] class EndpointReader(
try {
codec.decodeMessage(pdu, provider, localAddress)
} catch {
case NonFatal(e) => throw new EndpointException("Error while decoding incoming Akka PDU", e)
case NonFatal(e) => throw new EndpointException("Error while decoding incoming Pekko PDU", e)
}
}

View file

@ -347,9 +347,10 @@ private[pekko] class RemoteActorRefProvider(
if (!settings.HasCluster) {
if (remoteSettings.UseUnsafeRemoteFeaturesWithoutCluster)
log.info(
"Akka Cluster not in use - enabling unsafe features anyway because `pekko.remote.use-unsafe-remote-features-outside-cluster` has been enabled.")
"Pekko Cluster not in use - enabling unsafe features anyway because `pekko.remote.use-unsafe-remote-features-outside-cluster` has been enabled.")
else
log.warning("Akka Cluster not in use - Using Akka Cluster is recommended if you need remote watch and deploy.")
log.warning(
"Pekko Cluster not in use - Using Pekko Cluster is recommended if you need remote watch and deploy.")
}
protected def warnOnUnsafe(message: String): Unit =
@ -359,7 +360,7 @@ private[pekko] class RemoteActorRefProvider(
/**
* Logs if deathwatch message is intentionally dropped. To disable
* warnings set `pekko.remote.warn-unsafe-watch-outside-cluster` to `off`
* or use Akka Cluster.
* or use Pekko Cluster.
*/
private[pekko] def warnIfUnsafeDeathwatchWithoutCluster(watchee: ActorRef, watcher: ActorRef, action: String): Unit =
warnOnUnsafe(s"Dropped remote $action: disabled for [$watcher -> $watchee]")

View file

@ -27,49 +27,49 @@ import pekko.event.LogMarker
object RemoteLogMarker {
/**
* Marker "akkaFailureDetectorGrowing" of log event when failure detector heartbeat interval
* Marker "pekkoFailureDetectorGrowing" of log event when failure detector heartbeat interval
* is growing too large.
*
* @param remoteAddress The address of the node that the failure detector is monitoring. Included as property "akkaRemoteAddress".
* @param remoteAddress The address of the node that the failure detector is monitoring. Included as property "pekkoRemoteAddress".
*/
def failureDetectorGrowing(remoteAddress: String): LogMarker =
LogMarker("akkaFailureDetectorGrowing", Map(LogMarker.Properties.RemoteAddress -> remoteAddress))
LogMarker("pekkoFailureDetectorGrowing", Map(LogMarker.Properties.RemoteAddress -> remoteAddress))
/**
* Marker "akkaQuarantine" of log event when a node is quarantined.
* Marker "pekkoQuarantine" of log event when a node is quarantined.
*
* @param remoteAddress The address of the node that is quarantined. Included as property "akkaRemoteAddress".
* @param remoteAddressUid The address of the node that is quarantined. Included as property "akkaRemoteAddressUid".
* @param remoteAddress The address of the node that is quarantined. Included as property "pekkoRemoteAddress".
* @param remoteAddressUid The address of the node that is quarantined. Included as property "pekkoRemoteAddressUid".
*/
def quarantine(remoteAddress: Address, remoteAddressUid: Option[Long]): LogMarker =
LogMarker(
"akkaQuarantine",
"pekkoQuarantine",
Map(
LogMarker.Properties.RemoteAddress -> remoteAddress,
LogMarker.Properties.RemoteAddressUid -> remoteAddressUid.getOrElse("")))
/**
* Marker "akkaConnect" of log event when outbound connection is attempted.
* Marker "pekkoConnect" of log event when outbound connection is attempted.
*
* @param remoteAddress The address of the connected node. Included as property "akkaRemoteAddress".
* @param remoteAddressUid The address of the connected node. Included as property "akkaRemoteAddressUid".
* @param remoteAddress The address of the connected node. Included as property "pekkoRemoteAddress".
* @param remoteAddressUid The address of the connected node. Included as property "pekkoRemoteAddressUid".
*/
def connect(remoteAddress: Address, remoteAddressUid: Option[Long]): LogMarker =
LogMarker(
"akkaConnect",
"pekkoConnect",
Map(
LogMarker.Properties.RemoteAddress -> remoteAddress,
LogMarker.Properties.RemoteAddressUid -> remoteAddressUid.getOrElse("")))
/**
* Marker "akkaDisconnected" of log event when outbound connection is closed.
* Marker "pekkoDisconnected" of log event when outbound connection is closed.
*
* @param remoteAddress The address of the disconnected node. Included as property "akkaRemoteAddress".
* @param remoteAddressUid The address of the disconnected node. Included as property "akkaRemoteAddressUid".
* @param remoteAddress The address of the disconnected node. Included as property "pekkoRemoteAddress".
* @param remoteAddressUid The address of the disconnected node. Included as property "pekkoRemoteAddressUid".
*/
def disconnected(remoteAddress: Address, remoteAddressUid: Option[Long]): LogMarker =
LogMarker(
"akkaDisconnected",
"pekkoDisconnected",
Map(
LogMarker.Properties.RemoteAddress -> remoteAddress,
LogMarker.Properties.RemoteAddressUid -> remoteAddressUid.getOrElse("")))

View file

@ -33,13 +33,13 @@ private[remote] class PduCodecException(msg: String, cause: Throwable) extends P
* INTERNAL API
*
* Companion object of the [[pekko.remote.transport.PekkoPduCodec]] trait. Contains the representation case classes
* of decoded Akka Protocol Data Units (PDUs).
* of decoded Pekko Protocol Data Units (PDUs).
*/
@nowarn("msg=deprecated")
private[remote] object PekkoPduCodec {
/**
* Trait that represents decoded Akka PDUs (Protocol Data Units)
* Trait that represents decoded Pekko PDUs (Protocol Data Units)
*/
sealed trait PekkoPdu
final case class Associate(info: HandshakeInfo) extends PekkoPdu
@ -64,7 +64,7 @@ private[remote] object PekkoPduCodec {
/**
* INTERNAL API
*
* A Codec that is able to convert Akka PDUs (Protocol Data Units) from and to [[pekko.util.ByteString]]s.
* A Codec that is able to convert Pekko PDUs (Protocol Data Units) from and to [[pekko.util.ByteString]]s.
*/
@nowarn("msg=deprecated")
private[remote] trait PekkoPduCodec {
@ -75,21 +75,21 @@ private[remote] trait PekkoPduCodec {
* ByteString.
*
* @param raw
* Encoded raw byte representation of an Akka PDU
* Encoded raw byte representation of an Pekko PDU
* @return
* Case class representation of the decoded PDU that can be used in a match statement
*/
def decodePdu(raw: ByteString): PekkoPdu
/**
* Takes an [[pekko.remote.transport.PekkoPduCodec.PekkoPdu]] representation of an Akka PDU and returns its encoded
* Takes an [[pekko.remote.transport.PekkoPduCodec.PekkoPdu]] representation of an Pekko PDU and returns its encoded
* form as a [[pekko.util.ByteString]].
*
* For the same effect the constructXXX methods might be called directly, taking method parameters instead of the
* [[pekko.remote.transport.PekkoPduCodec.PekkoPdu]] final case classes.
*
* @param pdu
* The Akka Protocol Data Unit to be encoded
* The Pekko Protocol Data Unit to be encoded
* @return
* Encoded form as raw bytes
*/
@ -203,7 +203,8 @@ private[remote] object PekkoPduProtobufCodec$ extends PekkoPduCodec {
if (pdu.hasPayload) Payload(ByteString.fromByteBuffer(pdu.getPayload.asReadOnlyByteBuffer()))
else if (pdu.hasInstruction) decodeControlPdu(pdu.getInstruction)
else
throw new PduCodecException("Error decoding Akka PDU: Neither message nor control message were contained", null)
throw new PduCodecException("Error decoding Pekko PDU: Neither message nor control message were contained",
null)
} catch {
case e: InvalidProtocolBufferException => throw new PduCodecException("Decoding PDU failed.", e)
}

View file

@ -98,7 +98,7 @@ final case class HandshakeInfo(origin: Address, uid: Int, cookie: Option[String]
* Features provided by this transport are:
* - Soft-state associations via the use of heartbeats and failure detectors
* - Transparent origin address handling
* - pluggable codecs to encode and decode Akka PDUs
* - pluggable codecs to encode and decode Pekko PDUs
*
* It is not possible to load this transport dynamically using the configuration of remoting, because it does not
* expose a constructor with [[com.typesafe.config.Config]] and [[pekko.actor.ExtendedActorSystem]] parameters.
@ -112,7 +112,7 @@ final case class HandshakeInfo(origin: Address, uid: Int, cookie: Option[String]
* @param settings
* the configuration options of the Akka protocol
* @param codec
* the codec that will be used to encode/decode Akka PDUs
* the codec that will be used to encode/decode Pekko PDUs
*/
@nowarn("msg=deprecated")
private[remote] class PekkoProtocolTransport(
@ -157,7 +157,7 @@ private[transport] class PekkoProtocolManager(
}
private def actorNameFor(remoteAddress: Address): String =
"akkaProtocol-" + AddressUrlEncoder(remoteAddress) + "-" + nextId()
"pekkoProtocol-" + AddressUrlEncoder(remoteAddress) + "-" + nextId()
override def ready: Receive = {
case InboundAssociation(handle) =>
@ -741,7 +741,7 @@ private[remote] class ProtocolStateActor(
try codec.decodePdu(pdu)
catch {
case NonFatal(e) =>
throw new PekkoProtocolException("Error while decoding incoming Akka PDU of length: " + pdu.length, e)
throw new PekkoProtocolException("Error while decoding incoming Pekko PDU of length: " + pdu.length, e)
}
// Neither heartbeats neither disassociate cares about backing off if write fails:

View file

@ -387,7 +387,7 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA
* outbound connections are initiated in the shutdown phase.
*/
val channelGroup = new DefaultChannelGroup(
"akka-netty-transport-driver-channelgroup-" +
"pekko-netty-transport-driver-channelgroup-" +
uniqueIdCounter.getAndIncrement)
private val clientChannelFactory: ChannelFactory = {

View file

@ -103,18 +103,18 @@ class TestTransportSpec extends PekkoSpec with DefaultTimeout with ImplicitSende
// Initialize handles
handleA.readHandlerPromise.success(ActorHandleEventListener(self))
val akkaPDU = ByteString("AkkaPDU")
val pekkoPDU = ByteString("PekkoPDU")
awaitCond(registry.existsAssociation(addressA, addressB))
handleA.write(akkaPDU)
handleA.write(pekkoPDU)
expectMsgPF(timeout.duration, "Expect InboundPayload from A") {
case InboundPayload(payload) if payload == akkaPDU =>
case InboundPayload(payload) if payload == pekkoPDU =>
}
registry.logSnapshot.exists {
case WriteAttempt(sender, recipient, payload) =>
sender == addressA && recipient == addressB && payload == akkaPDU
sender == addressA && recipient == addressB && payload == pekkoPDU
case _ => false
} should ===(true)
}

View file

@ -24,7 +24,7 @@ import pekko.stream.scaladsl.Source
import pekko.util.ByteString
import pekko.util.ccompat._
// a few useful helpers copied over from akka-http
// a few useful helpers copied over from pekko-http
@ccompatUsedUntil213
object CompressionTestingTools {
implicit class AddFutureAwaitResult[T](val future: Future[T]) extends AnyVal {

View file

@ -64,12 +64,12 @@ private[pekko] trait MetricsKit extends MetricsKitOps {
def configureConsoleReporter(): Unit = {
if (settings.Reporters.contains("console")) {
val akkaConsoleReporter = new PekkoConsoleReporter(registry, settings.ConsoleReporter.Verbose)
val pekkoConsoleReporter = new PekkoConsoleReporter(registry, settings.ConsoleReporter.Verbose)
if (settings.ConsoleReporter.ScheduledReportInterval > Duration.Zero)
akkaConsoleReporter.start(settings.ConsoleReporter.ScheduledReportInterval.toMillis, TimeUnit.MILLISECONDS)
pekkoConsoleReporter.start(settings.ConsoleReporter.ScheduledReportInterval.toMillis, TimeUnit.MILLISECONDS)
reporters ::= akkaConsoleReporter
reporters ::= pekkoConsoleReporter
}
}

View file

@ -28,7 +28,7 @@ import pekko.testkit.metrics._
class PekkoConsoleReporter(registry: PekkoMetricRegistry, verbose: Boolean, output: PrintStream = System.out)
extends ScheduledReporter(
registry.asInstanceOf[MetricRegistry],
"akka-console-reporter",
"pekko-console-reporter",
MetricFilter.ALL,
TimeUnit.SECONDS,
TimeUnit.NANOSECONDS) {