diff --git a/.github/workflows/build-test-prValidation.yml b/.github/workflows/build-test-prValidation.yml index ab06a510dc..ffa08b5349 100644 --- a/.github/workflows/build-test-prValidation.yml +++ b/.github/workflows/build-test-prValidation.yml @@ -55,14 +55,14 @@ jobs: - name: sbt validatePullRequest run: |- sbtx -jvm-opts .jvmopts-ci \ - -Dakka.mima.enabled=false \ - -Dakka.test.multi-in-test=false \ - -Dakka.test.timefactor=2 \ - -Dakka.actor.testkit.typed.timefactor=2 \ - -Dakka.test.tags.exclude=gh-exclude,timing \ - -Dakka.cluster.assert=on \ + -Dpekko.mima.enabled=false \ + -Dpekko.test.multi-in-test=false \ + -Dpekko.test.timefactor=2 \ + -Dpekko.actor.testkit.typed.timefactor=2 \ + -Dpekko.test.tags.exclude=gh-exclude,timing \ + -Dpekko.cluster.assert=on \ -Dsbt.override.build.repos=false \ - -Dakka.test.multi-node=false \ + -Dpekko.test.multi-node=false \ -Dsbt.log.noformat=false \ - -Dakka.log.timestamps=true \ + -Dpekko.log.timestamps=true \ validateCompile validatePullRequest diff --git a/.github/workflows/link-validator.yml b/.github/workflows/link-validator.yml index 6542040587..243b3253c9 100644 --- a/.github/workflows/link-validator.yml +++ b/.github/workflows/link-validator.yml @@ -28,7 +28,7 @@ jobs: uses: coursier/cache-action@v6.4.0 - name: create the Akka site - run: sbt -Dakka.genjavadoc.enabled=true "Javaunidoc/doc; Compile/unidoc; docs/paradox" + run: sbt -Dpekko.genjavadoc.enabled=true "Javaunidoc/doc; Compile/unidoc; docs/paradox" - name: Install Coursier command line tool run: curl -fLo cs https://git.io/coursier-cli-linux && chmod +x cs && ./cs diff --git a/.github/workflows/multi-node.yml b/.github/workflows/multi-node.yml index cb41a503fc..fae6843bf0 100644 --- a/.github/workflows/multi-node.yml +++ b/.github/workflows/multi-node.yml @@ -52,14 +52,14 @@ jobs: run: | cat multi-node-test.hosts sbtx -jvm-opts .jvmopts-ci \ - -Dakka.test.timefactor=2 \ - -Dakka.actor.testkit.typed.timefactor=2 \ - -Dakka.test.tags.exclude=gh-exclude,timing \ - -Dakka.cluster.assert=on \ + -Dpekko.test.timefactor=2 \ + -Dpekko.actor.testkit.typed.timefactor=2 \ + -Dpekko.test.tags.exclude=gh-exclude,timing \ + -Dpekko.cluster.assert=on \ -Dsbt.override.build.repos=false \ - -Dakka.test.multi-node=true \ - -Dakka.test.multi-node.targetDirName=${PWD}/target/${{ github.run_id }} \ - -Dakka.test.multi-node.java=${JAVA_HOME}/bin/java \ + -Dpekko.test.multi-node=true \ + -Dpekko.test.multi-node.targetDirName=${PWD}/target/${{ github.run_id }} \ + -Dpekko.test.multi-node.java=${JAVA_HOME}/bin/java \ -Dmultinode.XX:MetaspaceSize=128M \ -Dmultinode.Xms512M \ -Dmultinode.Xmx512M \ @@ -136,15 +136,15 @@ jobs: run: | cat multi-node-test.hosts sbtx -jvm-opts .jvmopts-ci \ - -Dakka.test.timefactor=2 \ - -Dakka.actor.testkit.typed.timefactor=2 \ - -Dakka.cluster.assert=on \ - -Dakka.remote.artery.transport=aeron-udp \ + -Dpekko.test.timefactor=2 \ + -Dpekko.actor.testkit.typed.timefactor=2 \ + -Dpekko.cluster.assert=on \ + -Dpekko.remote.artery.transport=aeron-udp \ -Dsbt.override.build.repos=false \ - -Dakka.test.tags.exclude=gh-exclude,gh-exclude-aeron,timing \ - -Dakka.test.multi-node=true \ - -Dakka.test.multi-node.targetDirName=${PWD}/target/${{ github.run_id }} \ - -Dakka.test.multi-node.java=${JAVA_HOME}/bin/java \ + -Dpekko.test.tags.exclude=gh-exclude,gh-exclude-aeron,timing \ + -Dpekko.test.multi-node=true \ + -Dpekko.test.multi-node.targetDirName=${PWD}/target/${{ github.run_id }} \ + -Dpekko.test.multi-node.java=${JAVA_HOME}/bin/java \ -Dmultinode.XX:MetaspaceSize=128M \ -Dmultinode.Xms512M \ -Dmultinode.Xmx512M \ diff --git a/.github/workflows/nightly-builds.yml b/.github/workflows/nightly-builds.yml index a801b71064..67c57fc264 100644 --- a/.github/workflows/nightly-builds.yml +++ b/.github/workflows/nightly-builds.yml @@ -30,12 +30,12 @@ jobs: run: |- sbtx -jvm-opts .jvmopts-ci \ -Djava.security.egd=file:/dev/./urandom \ - -Dakka.test.sigar=true \ - -Dakka.cluster.assert=on \ - -Dakka.test.timefactor=2 \ - -Dakka.actor.testkit.typed.timefactor=2 \ - -Dakka.test.tags.exclude=gh-exclude,timing \ - -Dakka.log.timestamps=true \ + -Dpekko.test.sigar=true \ + -Dpekko.cluster.assert=on \ + -Dpekko.test.timefactor=2 \ + -Dpekko.actor.testkit.typed.timefactor=2 \ + -Dpekko.test.tags.exclude=gh-exclude,timing \ + -Dpekko.log.timestamps=true \ -Dmultinode.XX:MetaspaceSize=128M \ -Dmultinode.Xms256M \ -Dmultinode.Xmx256M \ @@ -100,12 +100,12 @@ jobs: run: |- sbtx -jvm-opts .jvmopts-ci \ -Djava.security.egd=file:/dev/./urandom \ - -Dakka.remote.artery.enabled=off \ - -Dakka.test.timefactor=2 \ - -Dakka.actor.testkit.typed.timefactor=2 \ - -Dakka.test.tags.exclude=gh-exclude,timing \ - -Dakka.test.multi-in-test=false \ - -Dakka.cluster.assert=on \ + -Dpekko.remote.artery.enabled=off \ + -Dpekko.test.timefactor=2 \ + -Dpekko.actor.testkit.typed.timefactor=2 \ + -Dpekko.test.tags.exclude=gh-exclude,timing \ + -Dpekko.test.multi-in-test=false \ + -Dpekko.cluster.assert=on \ clean ${{ matrix.command }} # comment out email actions until we have an email address to use (and we need to get INFRA to whitelist dawidd6/action-send-mail) @@ -161,12 +161,12 @@ jobs: # note that this is not running any multi-jvm tests because multi-in-test=false run: |- sbtx -jvm-opts .jvmopts-ci \ - -Dakka.cluster.assert=on \ - -Dakka.log.timestamps=true \ - -Dakka.test.timefactor=2 \ - -Dakka.actor.testkit.typed.timefactor=2 \ - -Dakka.test.tags.exclude=gh-exclude,timing \ - -Dakka.test.multi-in-test=false \ + -Dpekko.cluster.assert=on \ + -Dpekko.log.timestamps=true \ + -Dpekko.test.timefactor=2 \ + -Dpekko.actor.testkit.typed.timefactor=2 \ + -Dpekko.test.tags.exclude=gh-exclude,timing \ + -Dpekko.test.multi-in-test=false \ ${{ matrix.extraOpts }} \ clean "+~ ${{ matrix.scalaVersion }} test" checkTestsHaveRun @@ -195,7 +195,7 @@ jobs: run: |- sudo apt-get install graphviz sbtx -jvm-opts .jvmopts-ci \ - -Dakka.genjavadoc.enabled=true \ + -Dpekko.genjavadoc.enabled=true \ "+~ ${{ matrix.scalaVersion }} doc" - name: Publish @@ -204,7 +204,7 @@ jobs: run: |- sudo apt-get install graphviz sbtx -jvm-opts .jvmopts-ci \ - -Dakka.build.scalaVersion=${{ matrix.scalaVersion }} \ + -Dpekko.build.scalaVersion=${{ matrix.scalaVersion }} \ "+~ ${{ matrix.scalaVersion }} publishLocal publishM2" # comment out email actions until we have an email address to use (and we need to get INFRA to whitelist dawidd6/action-send-mail) @@ -253,12 +253,12 @@ jobs: run: |- sbtx -jvm-opts .jvmopts-ci \ -Djava.security.egd=file:/dev/./urandom \ - -Dakka.remote.artery.transport=aeron-udp \ - -Dakka.test.timefactor=2 \ - -Dakka.actor.testkit.typed.timefactor=2 \ - -Dakka.test.tags.exclude=gh-exclude,gh-exclude-aeron,timing \ - -Dakka.test.multi-in-test=false \ - -Dakka.cluster.assert=on \ + -Dpekko.remote.artery.transport=aeron-udp \ + -Dpekko.test.timefactor=2 \ + -Dpekko.actor.testkit.typed.timefactor=2 \ + -Dpekko.test.tags.exclude=gh-exclude,gh-exclude-aeron,timing \ + -Dpekko.test.multi-in-test=false \ + -Dpekko.cluster.assert=on \ -Daeron.dir=/opt/volumes/media-driver \ -Daeron.term.buffer.length=33554432 \ clean ${{ matrix.command }} diff --git a/.github/workflows/publish-docs.yml b/.github/workflows/publish-docs.yml index 3840b4ff8f..be9304fc75 100644 --- a/.github/workflows/publish-docs.yml +++ b/.github/workflows/publish-docs.yml @@ -33,7 +33,7 @@ jobs: chmod 600 /tmp/id_rsa ssh-add /tmp/id_rsa # using Scala 2.13 here to avoid the infamous problem with missing AskSupport in classpath - sbt -Dakka.genjavadoc.enabled=true "+~ 2.13 publishRsync" + sbt -Dpekko.genjavadoc.enabled=true "+~ 2.13 publishRsync" env: SCP_SECRET: ${{ secrets.SCP_SECRET }} diff --git a/.github/workflows/scala3-build.yml b/.github/workflows/scala3-build.yml index 2930b65d8d..2fbaba35fd 100644 --- a/.github/workflows/scala3-build.yml +++ b/.github/workflows/scala3-build.yml @@ -49,11 +49,11 @@ jobs: # note that this is not running any multi-jvm tests (yet) because multi-in-test=false run: | sbtx -jvm-opts .jvmopts-ci \ - -Dakka.log.timestamps=true \ - -Dakka.test.timefactor=2 \ - -Dakka.actor.testkit.typed.timefactor=2 \ - -Dakka.test.multi-in-test=false \ - -Dakka.test.tags.exclude=gh-exclude,timing \ + -Dpekko.log.timestamps=true \ + -Dpekko.test.timefactor=2 \ + -Dpekko.actor.testkit.typed.timefactor=2 \ + -Dpekko.test.multi-in-test=false \ + -Dpekko.test.tags.exclude=gh-exclude,timing \ -Dmultinode.XX:MetaspaceSize=128M \ -Dmultinode.Xms256M \ -Dmultinode.Xmx256M \ diff --git a/.github/workflows/timing-tests.yml b/.github/workflows/timing-tests.yml index e3fc3d4e9b..05b2b66824 100644 --- a/.github/workflows/timing-tests.yml +++ b/.github/workflows/timing-tests.yml @@ -30,12 +30,12 @@ jobs: run: |- sbtx -jvm-opts .jvmopts-ci \ -Djava.security.egd=file:/dev/./urandom \ - -Dakka.cluster.assert=on \ - -Dakka.test.timefactor=2 \ - -Dakka.actor.testkit.typed.timefactor=2 \ - -Dakka.test.tags.only=timing \ - -Dakka.log.timestamps=true \ - -Dakka.test.multi-in-test=false \ + -Dpekko.cluster.assert=on \ + -Dpekko.test.timefactor=2 \ + -Dpekko.actor.testkit.typed.timefactor=2 \ + -Dpekko.test.tags.only=timing \ + -Dpekko.log.timestamps=true \ + -Dpekko.test.multi-in-test=false \ -Dmultinode.XX:MetaspaceSize=128M \ -Dmultinode.Xms256M \ -Dmultinode.Xmx256M \ diff --git a/.jvmopts-ci b/.jvmopts-ci index 8d097d6f45..150c790eb9 100644 --- a/.jvmopts-ci +++ b/.jvmopts-ci @@ -12,4 +12,4 @@ -XX:MetaspaceSize=512M -XX:-ClassUnloadingWithConcurrentMark -Djava.security.egd=file:/dev/./urandom --Dakka.ci-server=true +-Dpekko.ci-server=true diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2b6b81a0b9..972f11d744 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -347,7 +347,7 @@ For further hints on how to disambiguate links in ScalaDoc comments see [this StackOverflow answer](https://stackoverflow.com/a/31569861/354132), though note that this syntax may not correctly render as Javadoc. -The Scaladoc tool needs the `dot` command from the [Graphviz](https://graphviz.org/#download) software package to be installed to avoid errors. You can disable the diagram generation by adding the flag `-Dakka.scaladoc.diagrams=false`. After installing Graphviz, make sure you add the toolset to the `PATH` (definitely on Windows). +The Scaladoc tool needs the `dot` command from the [Graphviz](https://graphviz.org/#download) software package to be installed to avoid errors. You can disable the diagram generation by adding the flag `-Dpekko.scaladoc.diagrams=false`. After installing Graphviz, make sure you add the toolset to the `PATH` (definitely on Windows). #### JavaDoc @@ -357,7 +357,7 @@ Generating JavaDoc is not enabled by default, as it's not needed on day-to-day d If you'd like to check if your links and formatting look good in JavaDoc (and not only in ScalaDoc), you can generate it by running: ```shell -sbt -Dakka.genjavadoc.enabled=true Javaunidoc/doc +sbt -Dpekko.genjavadoc.enabled=true Javaunidoc/doc ``` Which will generate JavaDoc style docs in `./target/javaunidoc/index.html`. This requires a JDK version 11 or later. @@ -431,13 +431,13 @@ Also, tests tagged as `PerformanceTest`, `TimingTest`, `LongRunningTest`, and al You can exclude the same kind of tests in your local build by starting sbt with: ```shell -sbt -Dakka.test.tags.exclude=performance,timing,long-running -Dakka.test.multi-in-test=false +sbt -Dpekko.test.tags.exclude=performance,timing,long-running -Dpekko.test.multi-in-test=false ``` It is also possible to exclude groups of test by their names. For example: ```shell -sbt -Dakka.test.names.exclude=akka.cluster.Stress +sbt -Dpekko.test.names.exclude=akka.cluster.Stress ``` Will exclude any tests that have names containing `akka.cluster.Stress`. @@ -476,7 +476,7 @@ In addition to formatting, the Pekko build enforces code discipline through a se to any non-empty string value when starting up sbt: ```shell -sbt -Dakka.no.discipline=youbet +sbt -Dpekko.no.discipline=youbet ``` PR validation includes the discipline flags and hence may fail if the flags were disabled during development. Make sure you compile your code at least once with discipline enabled before sending a PR. diff --git a/akka-actor-testkit-typed/src/main/resources/reference.conf b/akka-actor-testkit-typed/src/main/resources/reference.conf index fe341c02b8..1da447d66e 100644 --- a/akka-actor-testkit-typed/src/main/resources/reference.conf +++ b/akka-actor-testkit-typed/src/main/resources/reference.conf @@ -1,11 +1,11 @@ ################################################## -# Akka Actor Typed Testkit Reference Config File # +# Pekko Actor Typed Testkit Reference Config File # ################################################## # This is the reference config file that contains all the default settings. # Make your edits/overrides in your application.conf. -akka.actor.testkit.typed { +pekko.actor.testkit.typed { # Factor by which to scale timeouts during tests, e.g. to account for shared # build system load. timefactor = 1.0 diff --git a/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/TestKitSettings.scala b/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/TestKitSettings.scala index b4e15d8322..ea277de143 100644 --- a/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/TestKitSettings.scala +++ b/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/TestKitSettings.scala @@ -18,27 +18,27 @@ import pekko.util.Timeout object TestKitSettings { /** - * Reads configuration settings from `akka.actor.testkit.typed` section. + * Reads configuration settings from `pekko.actor.testkit.typed` section. */ def apply(system: ActorSystem[_]): TestKitSettings = Ext(system).settings /** * Reads configuration settings from given `Config` that - * must have the same layout as the `akka.actor.testkit.typed` section. + * must have the same layout as the `pekko.actor.testkit.typed` section. */ def apply(config: Config): TestKitSettings = new TestKitSettings(config) /** - * Java API: Reads configuration settings from `akka.actor.testkit.typed` section. + * Java API: Reads configuration settings from `pekko.actor.testkit.typed` section. */ def create(system: ActorSystem[_]): TestKitSettings = apply(system) /** * Reads configuration settings from given `Config` that - * must have the same layout as the `akka.actor.testkit.typed` section. + * must have the same layout as the `pekko.actor.testkit.typed` section. */ def create(config: Config): TestKitSettings = new TestKitSettings(config) @@ -49,7 +49,7 @@ object TestKitSettings { } private class Ext(system: ActorSystem[_]) extends Extension { - val settings: TestKitSettings = TestKitSettings(system.settings.config.getConfig("akka.actor.testkit.typed")) + val settings: TestKitSettings = TestKitSettings(system.settings.config.getConfig("pekko.actor.testkit.typed")) } } diff --git a/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/LoggingTestKit.scala b/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/LoggingTestKit.scala index 8ee1ed1dcf..43da82a708 100644 --- a/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/LoggingTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/LoggingTestKit.scala @@ -32,7 +32,7 @@ import pekko.annotation.DoNotInherit * outside (after) the `intercept` thunk and it has already found expected number. * * When occurrences is 0 it will look for unexpected matching events, and then it will - * also look for excess messages during the configured `akka.actor.testkit.typed.expect-no-message-default` + * also look for excess messages during the configured `pekko.actor.testkit.typed.expect-no-message-default` * duration. */ def withOccurrences(newOccurrences: Int): LoggingTestKit @@ -95,7 +95,7 @@ import pekko.annotation.DoNotInherit /** * Run the given code block and assert that the criteria of this `LoggingTestKit` has - * matched within the configured `akka.actor.testkit.typed.filter-leeway` + * matched within the configured `pekko.actor.testkit.typed.filter-leeway` * as often as requested by its `occurrences` parameter specifies. * * Care is taken to remove the testkit when the block is finished or aborted. diff --git a/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/TestProbe.scala b/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/TestProbe.scala index 51fa2f035d..3e936a217b 100644 --- a/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/TestProbe.scala +++ b/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/TestProbe.scala @@ -85,7 +85,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef /** * Obtain time remaining for execution of the innermost enclosing `within` * block or missing that it returns the properly dilated default for this - * case from settings (key "akka.actor.testkit.typed.single-expect-default"). + * case from settings (key "pekko.actor.testkit.typed.single-expect-default"). */ def getRemainingOrDefault: Duration @@ -108,7 +108,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef * take maximum wait times are available in a version which implicitly uses * the remaining time governed by the innermost enclosing `within` block. * - * Note that the max timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor", + * Note that the max timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor", * while the min Duration is not. * * {{{ @@ -156,8 +156,8 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef def expectNoMessage(max: Duration): Unit /** - * Assert that no message is received. Waits for the default period configured as `akka.actor.testkit.typed.expect-no-message-default`. - * That timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor". + * Assert that no message is received. Waits for the default period configured as `pekko.actor.testkit.typed.expect-no-message-default`. + * That timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor". */ def expectNoMessage(): Unit @@ -170,7 +170,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef /** * Wait for a message of type M and return it when it arrives, or fail if the `max` timeout is hit. * - * Note that the timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor". + * Note that the timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor". */ def expectMessageClass[T <: M](clazz: Class[T], max: Duration): T @@ -193,7 +193,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef /** * Receive `n` messages in a row before the given deadline. * - * Note that the timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor". + * Note that the timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor". */ def receiveSeveralMessages(n: Int, max: Duration): JList[M] @@ -211,7 +211,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef * partial function). * * @param max Max total time without the fisher function returning `CompleteFishing` before failing. - * The timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor". + * The timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor". * @return The messages accepted in the order they arrived */ def fishForMessage(max: Duration, fisher: java.util.function.Function[M, FishingOutcome]): java.util.List[M] @@ -228,7 +228,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef * Expect the given actor to be stopped or stop within the given timeout or * throw an [[AssertionError]]. * - * Note that the timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor". + * Note that the timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor". */ def expectTerminated[U](actorRef: ActorRef[U], max: Duration): Unit @@ -243,7 +243,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef * * If the `max` timeout expires the last exception is thrown. * - * Note that the timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor". + * Note that the timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor". */ def awaitAssert[A](max: Duration, interval: Duration, creator: Creator[A]): A @@ -253,7 +253,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef * * If the `max` timeout expires the last exception is thrown. * - * Note that the timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor". + * Note that the timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor". */ def awaitAssert[A](max: Duration, creator: Creator[A]): A diff --git a/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/LoggingTestKit.scala b/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/LoggingTestKit.scala index 250b5d5efe..3709254d56 100644 --- a/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/LoggingTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/LoggingTestKit.scala @@ -32,7 +32,7 @@ import pekko.annotation.DoNotInherit * outside (after) the `expect` thunk and it has already found expected number. * * When occurrences is 0 it will look for unexpected matching events, and then it will - * also look for excess messages during the configured `akka.actor.testkit.typed.expect-no-message-default` + * also look for excess messages during the configured `pekko.actor.testkit.typed.expect-no-message-default` * duration. */ def withOccurrences(newOccurrences: Int): LoggingTestKit @@ -94,7 +94,7 @@ import pekko.annotation.DoNotInherit /** * Run the given code block and assert that the criteria of this `LoggingTestKit` has - * matched within the configured `akka.actor.testkit.typed.filter-leeway` + * matched within the configured `pekko.actor.testkit.typed.filter-leeway` * as often as requested by its `occurrences` parameter specifies. * * Care is taken to remove the testkit when the block is finished or aborted. @@ -103,7 +103,7 @@ import pekko.annotation.DoNotInherit /** * Run the given code block and assert that the criteria of this `LoggingTestKit` has - * matched within the configured `akka.actor.testkit.typed.filter-leeway` + * matched within the configured `pekko.actor.testkit.typed.filter-leeway` * as often as requested by its `occurrences` parameter specifies. * * Care is taken to remove the testkit when the block is finished or aborted. diff --git a/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/ManualTime.scala b/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/ManualTime.scala index 144c771658..baaac6b41d 100644 --- a/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/ManualTime.scala +++ b/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/ManualTime.scala @@ -26,7 +26,7 @@ object ManualTime { */ val config: Config = ConfigFactory.parseString( - """akka.scheduler.implementation = "org.apache.pekko.testkit.ExplicitlyTriggeredScheduler"""") + """pekko.scheduler.implementation = "org.apache.pekko.testkit.ExplicitlyTriggeredScheduler"""") /** * Access the manual scheduler, note that you need to setup the actor system/testkit with [[ManualTime.config]] diff --git a/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestProbe.scala b/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestProbe.scala index 5a63cb1a49..f577f3b3f8 100644 --- a/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestProbe.scala +++ b/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestProbe.scala @@ -71,7 +71,7 @@ object TestProbe { /** * Obtain time remaining for execution of the innermost enclosing `within` * block or missing that it returns the properly dilated default for this - * case from settings (key "akka.actor.testkit.typed.single-expect-default"). + * case from settings (key "pekko.actor.testkit.typed.single-expect-default"). */ def remainingOrDefault: FiniteDuration @@ -94,7 +94,7 @@ object TestProbe { * take maximum wait times are available in a version which implicitly uses * the remaining time governed by the innermost enclosing `within` block. * - * Note that the max timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor", + * Note that the max timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor", * while the min Duration is not. * * {{{ @@ -141,8 +141,8 @@ object TestProbe { def expectNoMessage(max: FiniteDuration): Unit /** - * Assert that no message is received. Waits for the default period configured as `akka.actor.testkit.typed.expect-no-message-default`. - * That timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor". + * Assert that no message is received. Waits for the default period configured as `pekko.actor.testkit.typed.expect-no-message-default`. + * That timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor". */ def expectNoMessage(): Unit @@ -175,7 +175,7 @@ object TestProbe { /** * Receive `n` messages in a row before the given deadline. * - * Note that the timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor". + * Note that the timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor". */ def receiveMessages(n: Int, max: FiniteDuration): immutable.Seq[M] @@ -194,7 +194,7 @@ object TestProbe { * partial function). * * @param max Max total time without the fisher function returning `CompleteFishing` before failing. - * The timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor". + * The timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor". * @return The messages accepted in the order they arrived */ def fishForMessage(max: FiniteDuration, hint: String)(fisher: M => FishingOutcome): immutable.Seq[M] @@ -231,7 +231,7 @@ object TestProbe { * * If the `max` timeout expires the last exception is thrown. * - * Note that the timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor". + * Note that the timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor". */ def awaitAssert[A](a: => A, max: FiniteDuration, interval: FiniteDuration): A diff --git a/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/package.scala b/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/package.scala index c0558a5e89..42c6648a18 100644 --- a/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/package.scala +++ b/akka-actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/package.scala @@ -10,7 +10,7 @@ package object scaladsl { /** * Scala API. Scale timeouts (durations) during tests with the configured - * 'akka.actor.testkit.typed.timefactor'. + * 'pekko.actor.testkit.typed.timefactor'. * Implicit class providing `dilated` method. * * {{{ diff --git a/akka-actor-testkit-typed/src/test/java/jdocs/org/apache/pekko/actor/testkit/typed/javadsl/TestConfigExample.java b/akka-actor-testkit-typed/src/test/java/jdocs/org/apache/pekko/actor/testkit/typed/javadsl/TestConfigExample.java index 6c102ef2e1..3e19f479dc 100644 --- a/akka-actor-testkit-typed/src/test/java/jdocs/org/apache/pekko/actor/testkit/typed/javadsl/TestConfigExample.java +++ b/akka-actor-testkit-typed/src/test/java/jdocs/org/apache/pekko/actor/testkit/typed/javadsl/TestConfigExample.java @@ -19,12 +19,12 @@ public class TestConfigExample { ; // #parse-string - ConfigFactory.parseString("akka.loglevel = DEBUG \n" + "akka.log-config-on-start = on \n") + ConfigFactory.parseString("pekko.loglevel = DEBUG \n" + "pekko.log-config-on-start = on \n") // #parse-string ; // #fallback-application-conf - ConfigFactory.parseString("akka.loglevel = DEBUG \n" + "akka.log-config-on-start = on \n") + ConfigFactory.parseString("pekko.loglevel = DEBUG \n" + "pekko.log-config-on-start = on \n") .withFallback(ConfigFactory.load()) // #fallback-application-conf ; diff --git a/akka-actor-testkit-typed/src/test/scala/docs/org/apache/pekko/actor/testkit/typed/scaladsl/TestConfigExample.scala b/akka-actor-testkit-typed/src/test/scala/docs/org/apache/pekko/actor/testkit/typed/scaladsl/TestConfigExample.scala index cb66e0ecf7..07b53195af 100644 --- a/akka-actor-testkit-typed/src/test/scala/docs/org/apache/pekko/actor/testkit/typed/scaladsl/TestConfigExample.scala +++ b/akka-actor-testkit-typed/src/test/scala/docs/org/apache/pekko/actor/testkit/typed/scaladsl/TestConfigExample.scala @@ -16,15 +16,15 @@ object TestConfigExample { // #parse-string ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.log-config-on-start = on + pekko.loglevel = DEBUG + pekko.log-config-on-start = on """) // #parse-string // #fallback-application-conf ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.log-config-on-start = on + pekko.loglevel = DEBUG + pekko.log-config-on-start = on """).withFallback(ConfigFactory.load()) // #fallback-application-conf } diff --git a/akka-actor-testkit-typed/src/test/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestAppenderSpec.scala b/akka-actor-testkit-typed/src/test/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestAppenderSpec.scala index 64d0dfbd20..617aff8900 100644 --- a/akka-actor-testkit-typed/src/test/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestAppenderSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestAppenderSpec.scala @@ -17,7 +17,7 @@ class TestAppenderSpec extends ScalaTestWithActorTestKit( """ # increase to avoid spurious failures in "find unexpected async events withOccurrences(0)" - akka.actor.testkit.typed.expect-no-message-default = 1000 ms + pekko.actor.testkit.typed.expect-no-message-default = 1000 ms """) with AnyWordSpecLike with LogCapturing { diff --git a/akka-actor-testkit-typed/src/test/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestProbeSpec.scala b/akka-actor-testkit-typed/src/test/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestProbeSpec.scala index a052744d60..344529aa15 100644 --- a/akka-actor-testkit-typed/src/test/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestProbeSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestProbeSpec.scala @@ -171,8 +171,8 @@ class TestProbeSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with object TestProbeSpec { val timeoutConfig = ConfigFactory.parseString(""" - akka.actor.testkit.typed.default-timeout = 100ms - akka.test.default-timeout = 100ms""") + pekko.actor.testkit.typed.default-timeout = 100ms + pekko.test.default-timeout = 100ms""") /** Helper events for tests. */ final case class EventT(id: Long) diff --git a/akka-actor-tests/src/test/java/org/apache/pekko/actor/JavaExtension.java b/akka-actor-tests/src/test/java/org/apache/pekko/actor/JavaExtension.java index 73630e2871..94a04a5017 100644 --- a/akka-actor-tests/src/test/java/org/apache/pekko/actor/JavaExtension.java +++ b/akka-actor-tests/src/test/java/org/apache/pekko/actor/JavaExtension.java @@ -66,7 +66,7 @@ public class JavaExtension extends JUnitSuite { new AkkaJUnitActorSystemResource( "JavaExtension", ConfigFactory.parseString( - "akka.extensions = [ \"org.apache.pekko.actor.JavaExtension$TestExtensionId\" ]") + "pekko.extensions = [ \"org.apache.pekko.actor.JavaExtension$TestExtensionId\" ]") .withFallback(AkkaSpec.testConf())); private final ActorSystem system = actorSystemResource.getSystem(); diff --git a/akka-actor-tests/src/test/java/org/apache/pekko/actor/StashJavaAPI.java b/akka-actor-tests/src/test/java/org/apache/pekko/actor/StashJavaAPI.java index b01eca3881..93465b82ab 100644 --- a/akka-actor-tests/src/test/java/org/apache/pekko/actor/StashJavaAPI.java +++ b/akka-actor-tests/src/test/java/org/apache/pekko/actor/StashJavaAPI.java @@ -42,6 +42,6 @@ public class StashJavaAPI extends JUnitSuite { public void mustBeAbleToUseUnrestrictedStash() { testAStashApi( Props.create(StashJavaAPITestActors.WithUnrestrictedStash.class) - .withMailbox("akka.actor.mailbox.unbounded-deque-based")); + .withMailbox("pekko.actor.mailbox.unbounded-deque-based")); } } diff --git a/akka-actor-tests/src/test/java/org/apache/pekko/event/LoggingAdapterTest.java b/akka-actor-tests/src/test/java/org/apache/pekko/event/LoggingAdapterTest.java index 314e8761b3..51a9897f88 100644 --- a/akka-actor-tests/src/test/java/org/apache/pekko/event/LoggingAdapterTest.java +++ b/akka-actor-tests/src/test/java/org/apache/pekko/event/LoggingAdapterTest.java @@ -30,7 +30,7 @@ import static org.junit.Assert.assertTrue; public class LoggingAdapterTest extends JUnitSuite { - private static final Config config = ConfigFactory.parseString("akka.loglevel = DEBUG\n"); + private static final Config config = ConfigFactory.parseString("pekko.loglevel = DEBUG\n"); @Rule public AkkaJUnitActorSystemResource actorSystemResource = diff --git a/akka-actor-tests/src/test/resources/reference.conf b/akka-actor-tests/src/test/resources/reference.conf index c04e3f6740..211720948f 100644 --- a/akka-actor-tests/src/test/resources/reference.conf +++ b/akka-actor-tests/src/test/resources/reference.conf @@ -1,8 +1,8 @@ -akka { +pekko { # for the org.apache.pekko.actor.ExtensionSpec library-extensions += "org.apache.pekko.actor.InstanceCountingExtension" } # FIXME Some test depend on this setting when running on windows. # It should be removed when #17122 is solved. -akka.io.tcp.windows-connection-abort-workaround-enabled = auto +pekko.io.tcp.windows-connection-abort-workaround-enabled = auto diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorMailboxSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorMailboxSpec.scala index 2a6f90fa09..705a85fe98 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorMailboxSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorMailboxSpec.scala @@ -84,7 +84,7 @@ object ActorMailboxSpec { mailbox-type = "org.apache.pekko.actor.ActorMailboxSpec$$MCBoundedMailbox" } - akka.actor.deployment { + pekko.actor.deployment { /default-default { } /default-override-from-props { @@ -110,10 +110,10 @@ object ActorMailboxSpec { mailbox = bounded-mailbox-with-zero-pushtimeout } /default-unbounded-deque { - mailbox = akka.actor.mailbox.unbounded-deque-based + mailbox = pekko.actor.mailbox.unbounded-deque-based } /default-unbounded-deque-override-trait { - mailbox = akka.actor.mailbox.unbounded-deque-based + mailbox = pekko.actor.mailbox.unbounded-deque-based } /unbounded-default { dispatcher = unbounded-dispatcher @@ -138,20 +138,20 @@ object ActorMailboxSpec { } /bounded-deque-requirements-configured { dispatcher = requiring-bounded-dispatcher - mailbox = akka.actor.mailbox.bounded-deque-based + mailbox = pekko.actor.mailbox.bounded-deque-based } /bounded-deque-require-unbounded-configured { dispatcher = requiring-bounded-dispatcher - mailbox = akka.actor.mailbox.unbounded-deque-based + mailbox = pekko.actor.mailbox.unbounded-deque-based } /bounded-deque-require-unbounded-unconfigured { dispatcher = requiring-bounded-dispatcher } /bounded-deque-requirements-configured-props-disp { - mailbox = akka.actor.mailbox.bounded-deque-based + mailbox = pekko.actor.mailbox.bounded-deque-based } /bounded-deque-require-unbounded-configured-props-disp { - mailbox = akka.actor.mailbox.unbounded-deque-based + mailbox = pekko.actor.mailbox.unbounded-deque-based } /bounded-deque-requirements-configured-props-mail { dispatcher = requiring-bounded-dispatcher @@ -164,7 +164,7 @@ object ActorMailboxSpec { } } - akka.actor.mailbox.requirements { + pekko.actor.mailbox.requirements { "org.apache.pekko.actor.ActorMailboxSpec$$MCBoundedMessageQueueSemantics" = mc-bounded-mailbox } @@ -249,7 +249,7 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout "get an unbounded deque message queue when it is only configured on the props" in { checkMailboxQueue( - Props[QueueReportingActor]().withMailbox("akka.actor.mailbox.unbounded-deque-based"), + Props[QueueReportingActor]().withMailbox("pekko.actor.mailbox.unbounded-deque-based"), "default-override-from-props", UnboundedDeqMailboxTypes) } @@ -340,7 +340,7 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout "get an unbounded message queue overriding configuration on the props" in { checkMailboxQueue( - Props[QueueReportingActor]().withMailbox("akka.actor.mailbox.unbounded-deque-based"), + Props[QueueReportingActor]().withMailbox("pekko.actor.mailbox.unbounded-deque-based"), "bounded-unbounded-override-props", UnboundedMailboxTypes) } @@ -366,7 +366,7 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout checkMailboxQueue( Props[StashQueueReportingActor]() .withDispatcher("requiring-bounded-dispatcher") - .withMailbox("akka.actor.mailbox.bounded-deque-based"), + .withMailbox("pekko.actor.mailbox.bounded-deque-based"), "bounded-deque-requirements-configured-props", BoundedDeqMailboxTypes) } @@ -376,7 +376,7 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout system.actorOf( Props[StashQueueReportingActor]() .withDispatcher("requiring-bounded-dispatcher") - .withMailbox("akka.actor.mailbox.unbounded-deque-based"), + .withMailbox("pekko.actor.mailbox.unbounded-deque-based"), "bounded-deque-require-unbounded-configured-props")) } @@ -410,7 +410,7 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout "get a bounded deque-based message queue if configured and required with Props (mailbox)" in { checkMailboxQueue( - Props[StashQueueReportingActor]().withMailbox("akka.actor.mailbox.bounded-deque-based"), + Props[StashQueueReportingActor]().withMailbox("pekko.actor.mailbox.bounded-deque-based"), "bounded-deque-requirements-configured-props-mail", BoundedDeqMailboxTypes) } @@ -418,7 +418,7 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout "fail with a unbounded deque-based message queue if configured and required with Props (mailbox)" in { intercept[ConfigurationException]( system.actorOf( - Props[StashQueueReportingActor]().withMailbox("akka.actor.mailbox.unbounded-deque-based"), + Props[StashQueueReportingActor]().withMailbox("pekko.actor.mailbox.unbounded-deque-based"), "bounded-deque-require-unbounded-configured-props-mail")) } diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorRefSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorRefSpec.scala index 1db23837cf..5bdd982ef9 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorRefSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorRefSpec.scala @@ -114,7 +114,7 @@ object ActorRefSpec { class ActorRefSpec extends AkkaSpec(""" # testing Java serialization of ActorRef - akka.actor.allow-java-serialization = on + pekko.actor.allow-java-serialization = on """) with DefaultTimeout { import org.apache.pekko.actor.ActorRefSpec._ diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorSystemDispatcherSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorSystemDispatcherSpec.scala index 4158ecb767..0c6be11882 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorSystemDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorSystemDispatcherSpec.scala @@ -69,7 +69,7 @@ class ActorSystemDispatchersSpec extends AkkaSpec(ConfigFactory.parseString(""" val ecProbe = TestProbe() val ec = new SnitchingExecutionContext(ecProbe.ref, ExecutionContexts.global()) - val config = ConfigFactory.parseString("akka.actor.default-dispatcher.executor = \"fork-join-executor\"") + val config = ConfigFactory.parseString("pekko.actor.default-dispatcher.executor = \"fork-join-executor\"") val system2 = ActorSystem( name = "ActorSystemDispatchersSpec-ec-configured", config = Some(config), @@ -97,13 +97,13 @@ class ActorSystemDispatchersSpec extends AkkaSpec(ConfigFactory.parseString(""" val sys = ActorSystem( "ActorSystemDispatchersSpec-override-internal-disp", ConfigFactory.parseString(""" - akka.actor.internal-dispatcher = akka.actor.default-dispatcher + pekko.actor.internal-dispatcher = pekko.actor.default-dispatcher """)) try { // that the user guardian runs on the overridden dispatcher instead of internal // isn't really a guarantee any internal actor has been made running on the right one // but it's better than no test coverage at all - userGuardianDispatcher(sys) should ===("akka.actor.default-dispatcher") + userGuardianDispatcher(sys) should ===("pekko.actor.default-dispatcher") } finally { shutdown(sys) } @@ -118,7 +118,7 @@ class ActorSystemDispatchersSpec extends AkkaSpec(ConfigFactory.parseString(""" ActorSystem( name = "ActorSystemDispatchersSpec-passed-in-ec-for-internal", config = Some(ConfigFactory.parseString(""" - akka.actor.internal-dispatcher = akka.actor.default-dispatcher + pekko.actor.internal-dispatcher = pekko.actor.default-dispatcher """)), defaultExecutionContext = Some(ec)) @@ -141,7 +141,7 @@ class ActorSystemDispatchersSpec extends AkkaSpec(ConfigFactory.parseString(""" } "use an internal dispatcher for the guardian by default" in { - userGuardianDispatcher(system) should ===("akka.actor.internal-dispatcher") + userGuardianDispatcher(system) should ===("pekko.actor.internal-dispatcher") } "use the default dispatcher by a user provided user guardian" in { @@ -154,7 +154,7 @@ class ActorSystemDispatchersSpec extends AkkaSpec(ConfigFactory.parseString(""" ActorSystemSetup.empty) sys.start() try { - userGuardianDispatcher(sys) should ===("akka.actor.default-dispatcher") + userGuardianDispatcher(sys) should ===("pekko.actor.default-dispatcher") } finally shutdown(sys) } diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorSystemSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorSystemSpec.scala index 0ec95b43b6..298a6fe3a2 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorSystemSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ActorSystemSpec.scala @@ -142,7 +142,7 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend "log dead letters" in { val sys = - ActorSystem("LogDeadLetters", ConfigFactory.parseString("akka.loglevel=INFO").withFallback(AkkaSpec.testConf)) + ActorSystem("LogDeadLetters", ConfigFactory.parseString("pekko.loglevel=INFO").withFallback(AkkaSpec.testConf)) try { val probe = TestProbe()(sys) val a = sys.actorOf(Props[ActorSystemSpec.Terminater]()) @@ -166,7 +166,7 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend "log dead letters sent without sender reference" in { val sys = - ActorSystem("LogDeadLetters", ConfigFactory.parseString("akka.loglevel=INFO").withFallback(AkkaSpec.testConf)) + ActorSystem("LogDeadLetters", ConfigFactory.parseString("pekko.loglevel=INFO").withFallback(AkkaSpec.testConf)) try { val probe = TestProbe()(sys) val a = sys.actorOf(Props[ActorSystemSpec.Terminater]()) @@ -317,7 +317,7 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend ActorSystem( "Stop", ConfigFactory - .parseString("akka.actor.guardian-supervisor-strategy=org.apache.pekko.actor.StoppingSupervisorStrategy") + .parseString("pekko.actor.guardian-supervisor-strategy=org.apache.pekko.actor.StoppingSupervisorStrategy") .withFallback(AkkaSpec.testConf)) val a = system.actorOf(Props(new Actor { def receive = { @@ -340,7 +340,7 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend ActorSystem( "Stop", ConfigFactory - .parseString("akka.actor.guardian-supervisor-strategy=\"org.apache.pekko.actor.ActorSystemSpec$Strategy\"") + .parseString("pekko.actor.guardian-supervisor-strategy=\"org.apache.pekko.actor.ActorSystemSpec$Strategy\"") .withFallback(AkkaSpec.testConf)) val a = system.actorOf(Props(new Actor { def receive = { diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/Bench.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/Bench.scala index bcc9c242b6..0ed4ac9278 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/Bench.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/Bench.scala @@ -109,7 +109,7 @@ object Chameneos { } def run(): Unit = { - // System.setProperty("akka.config", "akka.conf") + // System.setProperty("pekko.config", "pekko.conf") Chameneos.start = System.currentTimeMillis val system = ActorSystem() system.actorOf(Props(new Mall(1000000, 4))) diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/CoordinatedShutdownSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/CoordinatedShutdownSpec.scala index f2bc587a1a..4d82d0cee0 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/CoordinatedShutdownSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/CoordinatedShutdownSpec.scala @@ -27,8 +27,8 @@ import scala.concurrent.Promise class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString(""" - akka.loglevel=INFO - akka.loggers = ["org.apache.pekko.testkit.TestEventListener"] + pekko.loglevel=INFO + pekko.loggers = ["org.apache.pekko.testkit.TestEventListener"] """)) { def extSys = system.asInstanceOf[ExtendedActorSystem] @@ -511,13 +511,13 @@ class CoordinatedShutdownSpec } "default exit code to 0" in { - lazy val conf = ConfigFactory.load().getConfig("akka.coordinated-shutdown") + lazy val conf = ConfigFactory.load().getConfig("pekko.coordinated-shutdown") val confWithOverrides = CoordinatedShutdown.confWithOverrides(conf, None) confWithOverrides.getInt("exit-code") should ===(0) } "default exit code to -1 when the Reason is ClusterDowning" in { - lazy val conf = ConfigFactory.load().getConfig("akka.coordinated-shutdown") + lazy val conf = ConfigFactory.load().getConfig("pekko.coordinated-shutdown") val confWithOverrides = CoordinatedShutdown.confWithOverrides(conf, Some(CoordinatedShutdown.ClusterDowningReason)) confWithOverrides.getInt("exit-code") should ===(-1) @@ -549,7 +549,7 @@ class CoordinatedShutdownSpec val sys = ActorSystem( system.name, ConfigFactory - .parseString("akka.coordinated-shutdown.run-by-actor-system-terminate = off") + .parseString("pekko.coordinated-shutdown.run-by-actor-system-terminate = off") .withFallback(system.settings.config)) try { Await.result(sys.terminate(), 10.seconds) @@ -565,7 +565,7 @@ class CoordinatedShutdownSpec val sys = ActorSystem( system.name, ConfigFactory - .parseString("akka.coordinated-shutdown.terminate-actor-system = off") + .parseString("pekko.coordinated-shutdown.terminate-actor-system = off") .withFallback(system.settings.config)) // will only get here if test is failing shutdown(sys) @@ -575,9 +575,9 @@ class CoordinatedShutdownSpec "add and remove user JVM hooks with run-by-jvm-shutdown-hook = off, terminate-actor-system = off" in new JvmHookTest { lazy val systemName = s"CoordinatedShutdownSpec-JvmHooks-1-${System.currentTimeMillis()}" lazy val systemConfig = ConfigFactory.parseString(""" - akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off - akka.coordinated-shutdown.terminate-actor-system = off - akka.coordinated-shutdown.run-by-actor-system-terminate = off + pekko.coordinated-shutdown.run-by-jvm-shutdown-hook = off + pekko.coordinated-shutdown.terminate-actor-system = off + pekko.coordinated-shutdown.run-by-actor-system-terminate = off """) override def withSystemRunning(newSystem: ActorSystem, coordinatedShutdown: CoordinatedShutdown): Unit = { @@ -591,9 +591,9 @@ class CoordinatedShutdownSpec "add and remove user JVM hooks with run-by-jvm-shutdown-hook = on, terminate-actor-system = off" in new JvmHookTest { lazy val systemName = s"CoordinatedShutdownSpec-JvmHooks-2-${System.currentTimeMillis()}" lazy val systemConfig = ConfigFactory.parseString(""" - akka.coordinated-shutdown.run-by-jvm-shutdown-hook = on - akka.coordinated-shutdown.terminate-actor-system = off - akka.coordinated-shutdown.run-by-actor-system-terminate = off + pekko.coordinated-shutdown.run-by-jvm-shutdown-hook = on + pekko.coordinated-shutdown.terminate-actor-system = off + pekko.coordinated-shutdown.run-by-actor-system-terminate = off """) override def withSystemRunning(newSystem: ActorSystem, coordinatedShutdown: CoordinatedShutdown): Unit = { @@ -608,8 +608,8 @@ class CoordinatedShutdownSpec "add and remove user JVM hooks with run-by-jvm-shutdown-hook = on, terminate-actor-system = on" in new JvmHookTest { lazy val systemName = s"CoordinatedShutdownSpec-JvmHooks-3-${System.currentTimeMillis()}" lazy val systemConfig = ConfigFactory.parseString(""" - akka.coordinated-shutdown.run-by-jvm-shutdown-hook = on - akka.coordinated-shutdown.terminate-actor-system = on + pekko.coordinated-shutdown.run-by-jvm-shutdown-hook = on + pekko.coordinated-shutdown.terminate-actor-system = on """) def withSystemRunning(newSystem: ActorSystem, coordinatedShutdown: CoordinatedShutdown): Unit = { @@ -620,11 +620,11 @@ class CoordinatedShutdownSpec } } - "add and remove user JVM hooks with run-by-jvm-shutdown-hook = on, akka.jvm-shutdown-hooks = off" in new JvmHookTest { + "add and remove user JVM hooks with run-by-jvm-shutdown-hook = on, pekko.jvm-shutdown-hooks = off" in new JvmHookTest { lazy val systemName = s"CoordinatedShutdownSpec-JvmHooks-4-${System.currentTimeMillis()}" lazy val systemConfig = ConfigFactory.parseString(""" - akka.jvm-shutdown-hooks = off - akka.coordinated-shutdown.run-by-jvm-shutdown-hook = on + pekko.jvm-shutdown-hooks = off + pekko.coordinated-shutdown.run-by-jvm-shutdown-hook = on """) def withSystemRunning(newSystem: ActorSystem, coordinatedShutdown: CoordinatedShutdown): Unit = { @@ -638,8 +638,8 @@ class CoordinatedShutdownSpec "access extension after system termination" in new JvmHookTest { lazy val systemName = s"CoordinatedShutdownSpec-terminated-${System.currentTimeMillis()}" lazy val systemConfig = ConfigFactory.parseString(""" - akka.coordinated-shutdown.run-by-jvm-shutdown-hook = on - akka.coordinated-shutdown.terminate-actor-system = on + pekko.coordinated-shutdown.run-by-jvm-shutdown-hook = on + pekko.coordinated-shutdown.terminate-actor-system = on """) def withSystemRunning(newSystem: ActorSystem, coordinatedShutdown: CoordinatedShutdown): Unit = { @@ -653,7 +653,7 @@ class CoordinatedShutdownSpec val system = ActorSystem( s"CoordinatedShutdownSpec-terminated-${System.currentTimeMillis()}", ConfigFactory.parseString(""" - akka.coordinated-shutdown.phases { + pekko.coordinated-shutdown.phases { before-actor-system-terminate { } @@ -780,7 +780,7 @@ class CoordinatedShutdownSpec } else false } } - private val csConfig = newSystem.settings.config.getConfig("akka.coordinated-shutdown") + private val csConfig = newSystem.settings.config.getConfig("pekko.coordinated-shutdown") // pretend extension creation and start private val cs = new CoordinatedShutdown(newSystem, CoordinatedShutdown.phasesFromConfig(csConfig), mockRuntime) CoordinatedShutdown.init(newSystem, csConfig, cs) diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/DeadLetterSuspensionSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/DeadLetterSuspensionSpec.scala index 152137b990..118f5cdae7 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/DeadLetterSuspensionSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/DeadLetterSuspensionSpec.scala @@ -35,9 +35,9 @@ object DeadLetterSuspensionSpec { } class DeadLetterSuspensionSpec extends AkkaSpec(""" - akka.loglevel = INFO - akka.log-dead-letters = 4 - akka.log-dead-letters-suspend-duration = 2s + pekko.loglevel = INFO + pekko.log-dead-letters = 4 + pekko.log-dead-letters-suspend-duration = 2s """) with ImplicitSender { import DeadLetterSuspensionSpec._ @@ -58,7 +58,7 @@ class DeadLetterSuspensionSpec extends AkkaSpec(""" private def expectedUnhandledLogMessage(count: Int): String = s"Message [java.lang.Integer] from $testActor to $unhandledActor was unhandled. [$count] dead letters encountered" - "must suspend dead-letters logging when reaching 'akka.log-dead-letters', and then re-enable" in { + "must suspend dead-letters logging when reaching 'pekko.log-dead-letters', and then re-enable" in { EventFilter.info(start = expectedDeadLettersLogMessage(1), occurrences = 1).intercept { deadActor ! 1 } diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/DeployerSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/DeployerSpec.scala index 065c2b36b6..bb211d13be 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/DeployerSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/DeployerSpec.scala @@ -17,7 +17,7 @@ import pekko.testkit.AkkaSpec object DeployerSpec { val deployerConf = ConfigFactory.parseString( """ - akka.actor.deployment { + pekko.actor.deployment { /service1 { } /service-direct { @@ -82,7 +82,7 @@ object DeployerSpec { class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { "A Deployer" must { - "be able to parse 'akka.actor.deployment._' with all default values" in { + "be able to parse 'pekko.actor.deployment._' with all default values" in { val service = "/service1" val deployment = system.asInstanceOf[ExtendedActorSystem].provider.deployer.lookup(service.split("/").drop(1)) @@ -103,7 +103,7 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { deployment should ===(None) } - "be able to parse 'akka.actor.deployment._' with dispatcher config" in { + "be able to parse 'pekko.actor.deployment._' with dispatcher config" in { val service = "/service3" val deployment = system.asInstanceOf[ExtendedActorSystem].provider.deployer.lookup(service.split("/").drop(1)) @@ -118,7 +118,7 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { Deploy.NoMailboxGiven))) } - "be able to parse 'akka.actor.deployment._' with mailbox config" in { + "be able to parse 'pekko.actor.deployment._' with mailbox config" in { val service = "/service4" val deployment = system.asInstanceOf[ExtendedActorSystem].provider.deployer.lookup(service.split("/").drop(1)) @@ -138,7 +138,7 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { val invalidDeployerConf = ConfigFactory .parseString( """ - akka.actor.deployment { + pekko.actor.deployment { /service-invalid-number-of-instances { router = round-robin-pool nr-of-instances = boom @@ -157,7 +157,7 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { val invalidDeployerConf = ConfigFactory .parseString( """ - akka.actor.deployment { + pekko.actor.deployment { /gul/ubåt { router = round-robin-pool nr-of-instances = 2 @@ -173,7 +173,7 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { e.getMessage should include("[/gul/ubåt]") } - "be able to parse 'akka.actor.deployment._' with from-code router" in { + "be able to parse 'pekko.actor.deployment._' with from-code router" in { assertRouting("/service-direct", NoRouter, "/service-direct") } @@ -181,26 +181,26 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { assertRouting("/service-direct2", NoRouter, "/service-direct2") } - "be able to parse 'akka.actor.deployment._' with round-robin router" in { + "be able to parse 'pekko.actor.deployment._' with round-robin router" in { assertRouting("/service-round-robin", RoundRobinPool(1), "/service-round-robin") } - "be able to parse 'akka.actor.deployment._' with random router" in { + "be able to parse 'pekko.actor.deployment._' with random router" in { assertRouting("/service-random", RandomPool(1), "/service-random") } - "be able to parse 'akka.actor.deployment._' with scatter-gather router" in { + "be able to parse 'pekko.actor.deployment._' with scatter-gather router" in { assertRouting( "/service-scatter-gather", ScatterGatherFirstCompletedPool(nrOfInstances = 1, within = 2 seconds), "/service-scatter-gather") } - "be able to parse 'akka.actor.deployment._' with consistent-hashing router" in { + "be able to parse 'pekko.actor.deployment._' with consistent-hashing router" in { assertRouting("/service-consistent-hashing", ConsistentHashingPool(1), "/service-consistent-hashing") } - "be able to parse 'akka.actor.deployment._' with router resizer" in { + "be able to parse 'pekko.actor.deployment._' with router resizer" in { val resizer = DefaultResizer() assertRouting("/service-resizer", RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)), "/service-resizer") } diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ExtensionSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ExtensionSpec.scala index ea64ba6c8f..678e5b2714 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ExtensionSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ExtensionSpec.scala @@ -59,7 +59,7 @@ class ExtensionSpec extends AnyWordSpec with Matchers { "The ActorSystem extensions support" should { "support extensions" in { - val config = ConfigFactory.parseString("""akka.extensions = ["org.apache.pekko.actor.TestExtension"]""") + val config = ConfigFactory.parseString("""pekko.extensions = ["org.apache.pekko.actor.TestExtension"]""") val system = ActorSystem("extensions", config) // TestExtension is configured and should be loaded at startup @@ -87,23 +87,23 @@ class ExtensionSpec extends AnyWordSpec with Matchers { shutdownActorSystem(system) } - "fail the actor system if an extension listed in akka.extensions fails to start" in { + "fail the actor system if an extension listed in pekko.extensions fails to start" in { intercept[RuntimeException] { val system = ActorSystem( "failing", ConfigFactory.parseString(""" - akka.extensions = ["org.apache.pekko.actor.FailingTestExtension"] + pekko.extensions = ["org.apache.pekko.actor.FailingTestExtension"] """)) shutdownActorSystem(system) } } - "log an error if an extension listed in akka.extensions cannot be loaded" in { + "log an error if an extension listed in pekko.extensions cannot be loaded" in { val system = ActorSystem( "failing", ConfigFactory.parseString(""" - akka.extensions = ["org.apache.pekko.actor.MissingExtension"] + pekko.extensions = ["org.apache.pekko.actor.MissingExtension"] """)) EventFilter.error( "While trying to load extension [org.apache.pekko.actor.MissingExtension], skipping.").intercept(())(system) @@ -115,7 +115,7 @@ class ExtensionSpec extends AnyWordSpec with Matchers { // could be initialized by other tests, but assuming tests are not running in parallel val countBefore = InstanceCountingExtension.createCount.get() val system = ActorSystem("extensions") - val listedExtensions = system.settings.config.getStringList("akka.library-extensions").asScala + val listedExtensions = system.settings.config.getStringList("pekko.library-extensions").asScala listedExtensions.count(_.contains("InstanceCountingExtension")) should ===(1) InstanceCountingExtension.createCount.get() - countBefore should ===(1) @@ -131,9 +131,9 @@ class ExtensionSpec extends AnyWordSpec with Matchers { "extensions", ConfigFactory.parseString( """ - akka.library-extensions = ["org.apache.pekko.actor.InstanceCountingExtension", "org.apache.pekko.actor.InstanceCountingExtension", "org.apache.pekko.actor.InstanceCountingExtension$"] + pekko.library-extensions = ["org.apache.pekko.actor.InstanceCountingExtension", "org.apache.pekko.actor.InstanceCountingExtension", "org.apache.pekko.actor.InstanceCountingExtension$"] """)) - val listedExtensions = system.settings.config.getStringList("akka.library-extensions").asScala + val listedExtensions = system.settings.config.getStringList("pekko.library-extensions").asScala listedExtensions.count(_.contains("InstanceCountingExtension")) should ===(3) // testing duplicate names InstanceCountingExtension.createCount.get() - countBefore should ===(1) @@ -146,7 +146,7 @@ class ExtensionSpec extends AnyWordSpec with Matchers { ActorSystem( "failing", ConfigFactory.parseString(""" - akka.library-extensions += "org.apache.pekko.actor.FailingTestExtension" + pekko.library-extensions += "org.apache.pekko.actor.FailingTestExtension" """).withFallback(ConfigFactory.load()).resolve()) } @@ -157,7 +157,7 @@ class ExtensionSpec extends AnyWordSpec with Matchers { ActorSystem( "failing", ConfigFactory.parseString(""" - akka.library-extensions += "org.apache.pekko.actor.MissingExtension" + pekko.library-extensions += "org.apache.pekko.actor.MissingExtension" """).withFallback(ConfigFactory.load())) } } diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/FSMActorSpec.scala index 997a3da770..2427df4c87 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/FSMActorSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/FSMActorSpec.scala @@ -101,7 +101,7 @@ object FSMActorSpec { final case class CodeState(soFar: String, code: String) } -class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with ImplicitSender { +class FSMActorSpec extends AkkaSpec(Map("pekko.actor.debug.fsm" -> true)) with ImplicitSender { import FSMActorSpec._ val timeout = Timeout(2 seconds) @@ -254,7 +254,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im "log events and transitions if asked to do so" in { import pekko.util.ccompat.JavaConverters._ val config = ConfigFactory - .parseMap(Map("akka.loglevel" -> "DEBUG", "akka.actor.debug.fsm" -> true).asJava) + .parseMap(Map("pekko.loglevel" -> "DEBUG", "pekko.actor.debug.fsm" -> true).asJava) .withFallback(system.settings.config) val fsmEventSystem = ActorSystem("fsmEvent", config) try { diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/FunctionRefSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/FunctionRefSpec.scala index 5c16cda488..e1233dd893 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/FunctionRefSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/FunctionRefSpec.scala @@ -39,10 +39,10 @@ object FunctionRefSpec { class FunctionRefSpec extends AkkaSpec(""" # test is using Java serialization and relies on serialize-messages=on - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off - akka.actor.serialize-messages = on - akka.actor.no-serialization-verification-needed-class-prefix = [] + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off + pekko.actor.serialize-messages = on + pekko.actor.no-serialization-verification-needed-class-prefix = [] """) with ImplicitSender { import FunctionRefSpec._ diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/LocalActorRefProviderSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/LocalActorRefProviderSpec.scala index 7378192fbc..f6d54d9b7f 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/LocalActorRefProviderSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/LocalActorRefProviderSpec.scala @@ -19,7 +19,7 @@ import pekko.util.Timeout object LocalActorRefProviderSpec { val config = """ - akka { + pekko { log-dead-letters = on actor { debug.unhandled = on diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/PropsCreationSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/PropsCreationSpec.scala index dcf60fc0f6..c1aa8979a0 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/PropsCreationSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/PropsCreationSpec.scala @@ -32,10 +32,10 @@ object PropsCreationSpec { class PropsCreationSpec extends AkkaSpec(""" # test is using Java serialization and relies on serialize-creators=on - akka.actor.serialize-creators = on - akka.actor.no-serialization-verification-needed-class-prefix = [] - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.serialize-creators = on + pekko.actor.no-serialization-verification-needed-class-prefix = [] + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """) { import org.apache.pekko.actor.PropsCreationSpec._ diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ProviderSelectionSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ProviderSelectionSpec.scala index ba10b8ac5a..1518260b1d 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ProviderSelectionSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/ProviderSelectionSpec.scala @@ -22,7 +22,7 @@ class ProviderSelectionSpec extends AbstractSpec { val classLoader = findClassLoader() def settingsWith(key: String): Settings = { - val c = ConfigFactory.parseString(s"""akka.actor.provider = "$key"""").withFallback(localConfig) + val c = ConfigFactory.parseString(s"""pekko.actor.provider = "$key"""").withFallback(localConfig) new Settings(classLoader, c, "test", setup) } diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/SchedulerSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/SchedulerSpec.scala index 9219cde749..03c37d2efe 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/SchedulerSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/SchedulerSpec.scala @@ -27,8 +27,8 @@ import pekko.testkit._ object SchedulerSpec { val testConfRevolver = ConfigFactory.parseString(""" - akka.scheduler.implementation = org.apache.pekko.actor.LightArrayRevolverScheduler - akka.scheduler.ticks-per-wheel = 32 + pekko.scheduler.implementation = org.apache.pekko.actor.LightArrayRevolverScheduler + pekko.scheduler.ticks-per-wheel = 32 """).withFallback(AkkaSpec.testConf) } @@ -473,7 +473,7 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev } "survive vicious enqueueing" taggedAs TimingTest in { - withScheduler(config = ConfigFactory.parseString("akka.scheduler.ticks-per-wheel=2")) { (sched, driver) => + withScheduler(config = ConfigFactory.parseString("pekko.scheduler.ticks-per-wheel=2")) { (sched, driver) => import driver._ import system.dispatcher val counter = new AtomicInteger @@ -533,7 +533,7 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev } "correctly wrap around wheel rounds" taggedAs TimingTest in { - withScheduler(config = ConfigFactory.parseString("akka.scheduler.ticks-per-wheel=2")) { (sched, driver) => + withScheduler(config = ConfigFactory.parseString("pekko.scheduler.ticks-per-wheel=2")) { (sched, driver) => implicit def ec: ExecutionContext = localEC import driver._ val start = step / 2 diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/SupervisorHierarchySpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/SupervisorHierarchySpec.scala index be555deced..74ed14a14a 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/SupervisorHierarchySpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/SupervisorHierarchySpec.scala @@ -85,8 +85,8 @@ object SupervisorHierarchySpec { hierarchy { type = "org.apache.pekko.actor.SupervisorHierarchySpec$MyDispatcherConfigurator" } - akka.loglevel = INFO - akka.actor.debug.fsm = on + pekko.loglevel = INFO + pekko.actor.debug.fsm = on """) class MyDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/TypedActorSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/TypedActorSpec.scala index a5ca46edda..3c1b2a0500 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/TypedActorSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/TypedActorSpec.scala @@ -36,11 +36,11 @@ object TypedActorSpec { fixed-pool-size = 60 } } - akka.actor.serializers.sample = "org.apache.pekko.actor.TypedActorSpec$SampleSerializerWithStringManifest$" - akka.actor.serialization-bindings."org.apache.pekko.actor.TypedActorSpec$WithStringSerializedClass" = sample + pekko.actor.serializers.sample = "org.apache.pekko.actor.TypedActorSpec$SampleSerializerWithStringManifest$" + pekko.actor.serialization-bindings."org.apache.pekko.actor.TypedActorSpec$WithStringSerializedClass" = sample # test is using Java serialization and not priority to convert - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """ class CyclicIterator[T](val items: immutable.Seq[T]) extends Iterator[T] { diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/dispatch/DispatchersSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/dispatch/DispatchersSpec.scala index e0f817f08d..eeaf85713f 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/dispatch/DispatchersSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/dispatch/DispatchersSpec.scala @@ -42,7 +42,7 @@ object DispatchersSpec { my-aliased-dispatcher = myapp.mydispatcher missing-aliased-dispatcher = myapp.missing-dispatcher } - akka.actor.deployment { + pekko.actor.deployment { /echo1 { dispatcher = myapp.mydispatcher } @@ -123,7 +123,7 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend def validTypes = typesAndValidators.keys.toList - val defaultDispatcherConfig = settings.config.getConfig("akka.actor.default-dispatcher") + val defaultDispatcherConfig = settings.config.getConfig("pekko.actor.default-dispatcher") lazy val allDispatchers: Map[String, MessageDispatcher] = { import pekko.util.ccompat.JavaConverters._ @@ -232,7 +232,7 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend "include system name and dispatcher id in thread names for default-dispatcher" in { system.actorOf(Props[ThreadNameEcho]()) ! "what's the name?" - val Expected = R("(DispatchersSpec-akka.actor.default-dispatcher-[1-9][0-9]*)") + val Expected = R("(DispatchersSpec-pekko.actor.default-dispatcher-[1-9][0-9]*)") expectMsgPF() { case Expected(_) => } @@ -268,7 +268,7 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend pool ! Identify(None) val routee = expectMsgType[ActorIdentity].ref.get routee ! "what's the name?" - val Expected = R("""(DispatchersSpec-akka\.actor\.deployment\./pool1\.pool-dispatcher-[1-9][0-9]*)""") + val Expected = R("""(DispatchersSpec-pekko\.actor\.deployment\./pool1\.pool-dispatcher-[1-9][0-9]*)""") expectMsgPF() { case Expected(_) => } diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/dungeon/DispatchSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/dungeon/DispatchSpec.scala index a8d576e5d1..0eb7aba247 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/actor/dungeon/DispatchSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/actor/dungeon/DispatchSpec.scala @@ -18,13 +18,13 @@ object DispatchSpec { } } class DispatchSpec extends AkkaSpec(""" - akka.actor.serialize-messages = on - akka.actor.no-serialization-verification-needed-class-prefix = [] + pekko.actor.serialize-messages = on + pekko.actor.no-serialization-verification-needed-class-prefix = [] """) with DefaultTimeout { import DispatchSpec._ "The dispatcher" should { - "log an appropriate message when akka.actor.serialize-messages triggers a serialization error" in { + "log an appropriate message when pekko.actor.serialize-messages triggers a serialization error" in { val actor = system.actorOf(Props[EmptyActor]()) EventFilter[Exception](pattern = ".*NoSerializationVerificationNeeded.*", occurrences = 1).intercept { actor ! new UnserializableMessageClass diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/config/ConfigSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/config/ConfigSpec.scala index 86bf0c17d2..6a710a6ea0 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/config/ConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/config/ConfigSpec.scala @@ -29,66 +29,66 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference(ActorSystem.fin { import config._ - getString("akka.version") should ===(ActorSystem.Version) + getString("pekko.version") should ===(ActorSystem.Version) settings.ConfigVersion should ===(ActorSystem.Version) - getBoolean("akka.daemonic") should ===(false) + getBoolean("pekko.daemonic") should ===(false) - getBoolean("akka.actor.serialize-messages") should ===(false) + getBoolean("pekko.actor.serialize-messages") should ===(false) settings.SerializeAllMessages should ===(false) settings.NoSerializationVerificationNeededClassPrefix should ===(Set("org.apache.pekko.")) - getInt("akka.scheduler.ticks-per-wheel") should ===(512) - getDuration("akka.scheduler.tick-duration", TimeUnit.MILLISECONDS) should ===(10L) - getString("akka.scheduler.implementation") should ===("org.apache.pekko.actor.LightArrayRevolverScheduler") + getInt("pekko.scheduler.ticks-per-wheel") should ===(512) + getDuration("pekko.scheduler.tick-duration", TimeUnit.MILLISECONDS) should ===(10L) + getString("pekko.scheduler.implementation") should ===("org.apache.pekko.actor.LightArrayRevolverScheduler") - getBoolean("akka.daemonic") should ===(false) + getBoolean("pekko.daemonic") should ===(false) settings.Daemonicity should ===(false) - getBoolean("akka.jvm-exit-on-fatal-error") should ===(true) + getBoolean("pekko.jvm-exit-on-fatal-error") should ===(true) settings.JvmExitOnFatalError should ===(true) settings.JvmShutdownHooks should ===(true) - getBoolean("akka.fail-mixed-versions") should ===(true) + getBoolean("pekko.fail-mixed-versions") should ===(true) settings.FailMixedVersions should ===(true) - getInt("akka.actor.deployment.default.virtual-nodes-factor") should ===(10) + getInt("pekko.actor.deployment.default.virtual-nodes-factor") should ===(10) settings.DefaultVirtualNodesFactor should ===(10) - getDuration("akka.actor.unstarted-push-timeout", TimeUnit.MILLISECONDS) should ===(10.seconds.toMillis) + getDuration("pekko.actor.unstarted-push-timeout", TimeUnit.MILLISECONDS) should ===(10.seconds.toMillis) settings.UnstartedPushTimeout.duration should ===(10.seconds) settings.Loggers.size should ===(1) settings.Loggers.head should ===(classOf[DefaultLogger].getName) - getStringList("akka.loggers").get(0) should ===(classOf[DefaultLogger].getName) + getStringList("pekko.loggers").get(0) should ===(classOf[DefaultLogger].getName) - getDuration("akka.logger-startup-timeout", TimeUnit.MILLISECONDS) should ===(5.seconds.toMillis) + getDuration("pekko.logger-startup-timeout", TimeUnit.MILLISECONDS) should ===(5.seconds.toMillis) settings.LoggerStartTimeout.duration should ===(5.seconds) - getString("akka.logging-filter") should ===(classOf[DefaultLoggingFilter].getName) + getString("pekko.logging-filter") should ===(classOf[DefaultLoggingFilter].getName) - getInt("akka.log-dead-letters") should ===(10) + getInt("pekko.log-dead-letters") should ===(10) settings.LogDeadLetters should ===(10) - getBoolean("akka.log-dead-letters-during-shutdown") should ===(false) + getBoolean("pekko.log-dead-letters-during-shutdown") should ===(false) settings.LogDeadLettersDuringShutdown should ===(false) - getDuration("akka.log-dead-letters-suspend-duration", TimeUnit.MILLISECONDS) should ===(5 * 60 * 1000L) + getDuration("pekko.log-dead-letters-suspend-duration", TimeUnit.MILLISECONDS) should ===(5 * 60 * 1000L) settings.LogDeadLettersSuspendDuration should ===(5.minutes) - getBoolean("akka.coordinated-shutdown.terminate-actor-system") should ===(true) + getBoolean("pekko.coordinated-shutdown.terminate-actor-system") should ===(true) settings.CoordinatedShutdownTerminateActorSystem should ===(true) - getBoolean("akka.coordinated-shutdown.run-by-actor-system-terminate") should ===(true) + getBoolean("pekko.coordinated-shutdown.run-by-actor-system-terminate") should ===(true) settings.CoordinatedShutdownRunByActorSystemTerminate should ===(true) - getBoolean("akka.actor.allow-java-serialization") should ===(false) + getBoolean("pekko.actor.allow-java-serialization") should ===(false) settings.AllowJavaSerialization should ===(false) } { - val c = config.getConfig("akka.actor.default-dispatcher") + val c = config.getConfig("pekko.actor.default-dispatcher") // General dispatcher config @@ -133,7 +133,7 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference(ActorSystem.fin // Debug config { - val debug = config.getConfig("akka.actor.debug") + val debug = config.getConfig("pekko.actor.debug") import debug._ getBoolean("receive") should ===(false) settings.AddLoggingReceive should ===(false) @@ -160,7 +160,7 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference(ActorSystem.fin } { - val c = config.getConfig("akka.actor.default-mailbox") + val c = config.getConfig("pekko.actor.default-mailbox") // general mailbox config @@ -177,19 +177,19 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference(ActorSystem.fin "not be amended for default reference in akka-actor" in { val dynamicAccess = system.asInstanceOf[ExtendedActorSystem].dynamicAccess val config = ActorSystem.Settings.amendSlf4jConfig(ConfigFactory.defaultReference(), dynamicAccess) - config.getStringList("akka.loggers").size() should ===(1) - config.getStringList("akka.loggers").get(0) should ===(classOf[DefaultLogger].getName) - config.getString("akka.logging-filter") should ===(classOf[DefaultLoggingFilter].getName) + config.getStringList("pekko.loggers").size() should ===(1) + config.getStringList("pekko.loggers").get(0) should ===(classOf[DefaultLogger].getName) + config.getString("pekko.logging-filter") should ===(classOf[DefaultLoggingFilter].getName) } "not be amended when akka-slf4j is not in classpath" in { val dynamicAccess = system.asInstanceOf[ExtendedActorSystem].dynamicAccess val config = ActorSystem.Settings.amendSlf4jConfig( - ConfigFactory.parseString("akka.use-slf4j = on").withFallback(ConfigFactory.defaultReference()), + ConfigFactory.parseString("pekko.use-slf4j = on").withFallback(ConfigFactory.defaultReference()), dynamicAccess) - config.getStringList("akka.loggers").size() should ===(1) - config.getStringList("akka.loggers").get(0) should ===(classOf[DefaultLogger].getName) - config.getString("akka.logging-filter") should ===(classOf[DefaultLoggingFilter].getName) + config.getStringList("pekko.loggers").size() should ===(1) + config.getStringList("pekko.loggers").get(0) should ===(classOf[DefaultLogger].getName) + config.getString("pekko.logging-filter") should ===(classOf[DefaultLoggingFilter].getName) } } } diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/dispatch/DispatcherShutdownSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/dispatch/DispatcherShutdownSpec.scala index c6fbd55900..95e67c21b0 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/dispatch/DispatcherShutdownSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/dispatch/DispatcherShutdownSpec.scala @@ -29,8 +29,8 @@ class DispatcherShutdownSpec extends AnyWordSpec with Matchers { .toList .map(_.getThreadName) .filter(name => - name.startsWith("DispatcherShutdownSpec-akka.actor.default") || name.startsWith( - "DispatcherShutdownSpec-akka.actor.internal")) // nothing is run on default without any user actors started + name.startsWith("DispatcherShutdownSpec-pekko.actor.default") || name.startsWith( + "DispatcherShutdownSpec-pekko.actor.internal")) // nothing is run on default without any user actors started .size val system = ActorSystem("DispatcherShutdownSpec") diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/event/AddressTerminatedTopicBenchSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/event/AddressTerminatedTopicBenchSpec.scala index 7d259aa2e7..60a4dc30ab 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/event/AddressTerminatedTopicBenchSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/event/AddressTerminatedTopicBenchSpec.scala @@ -27,7 +27,7 @@ object AddressTerminatedTopicBenchSpec { } } -class AddressTerminatedTopicBenchSpec extends AkkaSpec("akka.loglevel=INFO") { +class AddressTerminatedTopicBenchSpec extends AkkaSpec("pekko.loglevel=INFO") { import AddressTerminatedTopicBenchSpec._ "Subscribe and unsubscribe of AddressTerminated" must { diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/event/EventBusSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/event/EventBusSpec.scala index 8cd39e47f8..00bfd73c6a 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/event/EventBusSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/event/EventBusSpec.scala @@ -172,7 +172,7 @@ class ActorEventBusSpec(conf: Config) extends EventBusSpec("ActorEventBus", conf import org.apache.pekko.event.ActorEventBusSpec._ def this() = - this(ConfigFactory.parseString("akka.actor.debug.event-stream = on").withFallback(AkkaSpec.testConf)) + this(ConfigFactory.parseString("pekko.actor.debug.event-stream = on").withFallback(AkkaSpec.testConf)) type BusType = MyActorEventBus def createNewEventBus(): BusType = new MyActorEventBus(system) diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/event/EventStreamSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/event/EventStreamSpec.scala index ad0f48e1eb..732f5e8894 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/event/EventStreamSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/event/EventStreamSpec.scala @@ -15,7 +15,7 @@ import pekko.testkit.{ AkkaSpec, TestProbe } object EventStreamSpec { val config = ConfigFactory.parseString(""" - akka { + pekko { stdout-loglevel = WARNING loglevel = INFO loggers = ["org.apache.pekko.event.EventStreamSpec$MyLog", "%s"] @@ -23,7 +23,7 @@ object EventStreamSpec { """.format(Logging.StandardOutLogger.getClass.getName)) val configUnhandled = ConfigFactory.parseString(""" - akka { + pekko { stdout-loglevel = WARNING loglevel = WARNING actor.debug.unhandled = on @@ -32,7 +32,7 @@ object EventStreamSpec { """) val configUnhandledWithDebug = - ConfigFactory.parseString("akka.actor.debug.event-stream = on").withFallback(configUnhandled) + ConfigFactory.parseString("pekko.actor.debug.event-stream = on").withFallback(configUnhandled) final case class M(i: Int) diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/event/LoggerSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/event/LoggerSpec.scala index de751ae33f..771fe49161 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/event/LoggerSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/event/LoggerSpec.scala @@ -29,7 +29,7 @@ import pekko.util.Helpers object LoggerSpec { val defaultConfig = ConfigFactory.parseString(""" - akka { + pekko { stdout-loglevel = "WARNING" loglevel = "DEBUG" # test verifies debug loggers = ["org.apache.pekko.event.LoggerSpec$TestLogger1"] @@ -37,7 +37,7 @@ object LoggerSpec { """).withFallback(AkkaSpec.testConf) val slowConfig = ConfigFactory.parseString(""" - akka { + pekko { stdout-loglevel = "ERROR" loglevel = "ERROR" loggers = ["org.apache.pekko.event.LoggerSpec$SlowLogger"] @@ -45,7 +45,7 @@ object LoggerSpec { """).withFallback(AkkaSpec.testConf) val noLoggingConfig = ConfigFactory.parseString(""" - akka { + pekko { stdout-loglevel = "OFF" loglevel = "OFF" loggers = ["org.apache.pekko.event.LoggerSpec$TestLogger1"] @@ -54,7 +54,7 @@ object LoggerSpec { val multipleConfig = ConfigFactory.parseString(""" - akka { + pekko { stdout-loglevel = "OFF" loglevel = "WARNING" loggers = ["org.apache.pekko.event.LoggerSpec$TestLogger1", "org.apache.pekko.event.LoggerSpec$TestLogger2"] @@ -62,7 +62,7 @@ object LoggerSpec { """).withFallback(AkkaSpec.testConf) val ticket3165Config = ConfigFactory.parseString(s""" - akka { + pekko { stdout-loglevel = "WARNING" loglevel = "DEBUG" # test verifies debug loggers = ["org.apache.pekko.event.LoggerSpec$$TestLogger1"] @@ -78,7 +78,7 @@ object LoggerSpec { """).withFallback(AkkaSpec.testConf) val ticket3671Config = ConfigFactory.parseString(""" - akka { + pekko { stdout-loglevel = "WARNING" loglevel = "WARNING" loggers = ["org.apache.pekko.event.LoggerSpec$TestLogger1"] diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/event/LoggingReceiveSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/event/LoggingReceiveSpec.scala index 999a3c9a06..a64a02ebbd 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/event/LoggingReceiveSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/event/LoggingReceiveSpec.scala @@ -29,16 +29,16 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll { import LoggingReceiveSpec._ val config = ConfigFactory.parseString(""" - akka.loglevel=DEBUG # test verifies debug + pekko.loglevel=DEBUG # test verifies debug """).withFallback(AkkaSpec.testConf) val appLogging = - ActorSystem("logging", ConfigFactory.parseMap(Map("akka.actor.debug.receive" -> true).asJava).withFallback(config)) + ActorSystem("logging", ConfigFactory.parseMap(Map("pekko.actor.debug.receive" -> true).asJava).withFallback(config)) val appAuto = ActorSystem( "autoreceive", - ConfigFactory.parseMap(Map("akka.actor.debug.autoreceive" -> true).asJava).withFallback(config)) + ConfigFactory.parseMap(Map("pekko.actor.debug.autoreceive" -> true).asJava).withFallback(config)) val appLifecycle = ActorSystem( "lifecycle", - ConfigFactory.parseMap(Map("akka.actor.debug.lifecycle" -> true).asJava).withFallback(config)) + ConfigFactory.parseMap(Map("pekko.actor.debug.lifecycle" -> true).asJava).withFallback(config)) val filter = TestEvent.Mute(EventFilter.custom { case _: Logging.Debug => true diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/event/jul/JavaLoggerSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/event/jul/JavaLoggerSpec.scala index 4c42a31a14..b8be932a86 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/event/jul/JavaLoggerSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/event/jul/JavaLoggerSpec.scala @@ -18,7 +18,7 @@ import pekko.testkit.AkkaSpec object JavaLoggerSpec { val config = ConfigFactory.parseString(""" - akka { + pekko { loglevel = INFO loggers = ["org.apache.pekko.event.jul.JavaLogger"] logging-filter = "org.apache.pekko.event.jul.JavaLoggingFilter" diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/io/CapacityLimitSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/io/CapacityLimitSpec.scala index 0c29aeebe6..842288d5dc 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/io/CapacityLimitSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/io/CapacityLimitSpec.scala @@ -11,8 +11,8 @@ import pekko.testkit.{ AkkaSpec, TestProbe } import pekko.testkit.SocketUtil.temporaryServerAddresses class CapacityLimitSpec extends AkkaSpec(""" - akka.loglevel = ERROR - akka.io.tcp.max-channels = 4 + pekko.loglevel = ERROR + pekko.io.tcp.max-channels = 4 """) with TcpIntegrationSpecSupport { "The TCP transport implementation" should { diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/io/InetAddressDnsResolverSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/io/InetAddressDnsResolverSpec.scala index cd85691835..95d556be62 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/io/InetAddressDnsResolverSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/io/InetAddressDnsResolverSpec.scala @@ -15,8 +15,8 @@ import pekko.testkit.{ AkkaSpec, TestActorRef } @nowarn class InetAddressDnsResolverSpec extends AkkaSpec(""" - akka.io.dns.inet-address.positive-ttl = default - akka.io.dns.inet-address.negative-ttl = default + pekko.io.dns.inet-address.positive-ttl = default + pekko.io.dns.inet-address.negative-ttl = default """) { thisSpecs => "The DNS resolver default ttl's" must { @@ -95,7 +95,7 @@ class InetAddressDnsResolverSpec extends AkkaSpec(""" Props( classOf[InetAddressDnsResolver], new SimpleDnsCache(), - system.settings.config.getConfig("akka.io.dns.inet-address"))) + system.settings.config.getConfig("pekko.io.dns.inet-address"))) actorRef.underlyingActor } @@ -123,8 +123,8 @@ class InetAddressDnsResolverSpec extends AkkaSpec(""" @nowarn class InetAddressDnsResolverConfigSpec extends AkkaSpec(""" - akka.io.dns.inet-address.positive-ttl = forever - akka.io.dns.inet-address.negative-ttl = never + pekko.io.dns.inet-address.positive-ttl = forever + pekko.io.dns.inet-address.negative-ttl = never """) { thisSpecs => @@ -144,7 +144,7 @@ class InetAddressDnsResolverConfigSpec extends AkkaSpec(""" Props( classOf[InetAddressDnsResolver], new SimpleDnsCache(), - system.settings.config.getConfig("akka.io.dns.inet-address"))) + system.settings.config.getConfig("pekko.io.dns.inet-address"))) actorRef.underlyingActor } } diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/io/TcpConnectionSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/io/TcpConnectionSpec.scala index c9482c3732..c7a028aa10 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/io/TcpConnectionSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/io/TcpConnectionSpec.scala @@ -41,10 +41,10 @@ object TcpConnectionSpec { } class TcpConnectionSpec extends AkkaSpec(""" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.io.tcp.trace-logging = on - akka.io.tcp.register-timeout = 500ms + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.io.tcp.trace-logging = on + pekko.io.tcp.register-timeout = 500ms """) with WithLogCapturing { thisSpecs => import TcpConnectionSpec._ @@ -377,7 +377,7 @@ class TcpConnectionSpec extends AkkaSpec(""" "respect pull mode" in new EstablishedConnectionTest(pullMode = true) { // override config to decrease default buffer size def config = - ConfigFactory.parseString("akka.io.tcp.direct-buffer-size = 1k").withFallback(AkkaSpec.testConf) + ConfigFactory.parseString("pekko.io.tcp.direct-buffer-size = 1k").withFallback(AkkaSpec.testConf) override implicit lazy val system: ActorSystem = ActorSystem("respectPullModeTest", config) try run { diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/io/TcpIntegrationSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/io/TcpIntegrationSpec.scala index 6b0f7dcf15..249981e1fd 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/io/TcpIntegrationSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/io/TcpIntegrationSpec.scala @@ -20,9 +20,9 @@ import pekko.testkit.WithLogCapturing import pekko.util.ByteString class TcpIntegrationSpec extends AkkaSpec(""" - akka.loglevel = debug - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.io.tcp.trace-logging = on + pekko.loglevel = debug + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.io.tcp.trace-logging = on """) with TcpIntegrationSpecSupport with TimeLimits with WithLogCapturing { def verifyActorTermination(actor: ActorRef): Unit = { diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/io/TcpListenerSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/io/TcpListenerSpec.scala index 03912f1307..bfcf5ffb0a 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/io/TcpListenerSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/io/TcpListenerSpec.scala @@ -21,7 +21,7 @@ import pekko.testkit.{ AkkaSpec, EventFilter, TestActorRef, TestProbe } import pekko.testkit.SocketUtil class TcpListenerSpec extends AkkaSpec(""" - akka.io.tcp.batch-accept-limit = 2 + pekko.io.tcp.batch-accept-limit = 2 """) { "A TcpListener" must { diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/io/UdpConnectedIntegrationSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/io/UdpConnectedIntegrationSpec.scala index 0c432e5ad1..e4eff683ae 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/io/UdpConnectedIntegrationSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/io/UdpConnectedIntegrationSpec.scala @@ -18,14 +18,14 @@ import pekko.testkit.WithLogCapturing import pekko.util.ByteString class UdpConnectedIntegrationSpec extends AkkaSpec(""" - akka.loglevel = DEBUG - akka.actor.debug.lifecycle = on - akka.actor.debug.autoreceive = on - akka.io.udp-connected.trace-logging = on + pekko.loglevel = DEBUG + pekko.actor.debug.lifecycle = on + pekko.actor.debug.autoreceive = on + pekko.io.udp-connected.trace-logging = on # issues with dns resolution of non existent host hanging with the # Java native host resolution - akka.io.dns.resolver = async-dns - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.io.dns.resolver = async-dns + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] """) with ImplicitSender with WithLogCapturing { val addresses = temporaryServerAddresses(5, udp = true) diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/io/UdpIntegrationSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/io/UdpIntegrationSpec.scala index d97050433f..5298b3b839 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/io/UdpIntegrationSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/io/UdpIntegrationSpec.scala @@ -15,7 +15,7 @@ import pekko.testkit.SocketUtil.temporaryServerAddresses import pekko.util.ByteString class UdpIntegrationSpec extends AkkaSpec(""" - akka.loglevel = INFO + pekko.loglevel = INFO # tests expect to be able to mutate messages """) with ImplicitSender { diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/AsyncDnsResolverIntegrationSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/AsyncDnsResolverIntegrationSpec.scala index 95baa5f2db..edb8c4f4aa 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/AsyncDnsResolverIntegrationSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/AsyncDnsResolverIntegrationSpec.scala @@ -33,13 +33,13 @@ object AsyncDnsResolverIntegrationSpec { lazy val dockerDnsServerPort: Int = SocketUtil.temporaryLocalPort(Both) implicit val defaultTimeout: Timeout = Timeout(10.seconds) def conf = ConfigFactory.parseString(s""" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.io.dns.resolver = async-dns - akka.io.dns.async-dns.nameservers = ["localhost:${dockerDnsServerPort}"] - akka.io.dns.async-dns.search-domains = ["foo.test", "test"] - akka.io.dns.async-dns.ndots = 2 - akka.io.dns.async-dns.resolve-timeout = ${defaultTimeout.duration.toSeconds}s + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.io.dns.resolver = async-dns + pekko.io.dns.async-dns.nameservers = ["localhost:${dockerDnsServerPort}"] + pekko.io.dns.async-dns.search-domains = ["foo.test", "test"] + pekko.io.dns.async-dns.ndots = 2 + pekko.io.dns.async-dns.resolve-timeout = ${defaultTimeout.duration.toSeconds}s """) } diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/AsyncDnsManagerSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/AsyncDnsManagerSpec.scala index d49c4947e5..5e91e04bb8 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/AsyncDnsManagerSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/AsyncDnsManagerSpec.scala @@ -21,10 +21,10 @@ import pekko.testkit.WithLogCapturing // tests deprecated DNS API @nowarn("msg=deprecated") class AsyncDnsManagerSpec extends AkkaSpec(""" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.io.dns.resolver = async-dns - akka.io.dns.async-dns.nameservers = default + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.io.dns.resolver = async-dns + pekko.io.dns.async-dns.nameservers = default """) with ImplicitSender with WithLogCapturing { val dns = Dns(system).manager diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/AsyncDnsResolverSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/AsyncDnsResolverSpec.scala index 5d3e3f3e1c..3e3ab4659d 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/AsyncDnsResolverSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/AsyncDnsResolverSpec.scala @@ -23,8 +23,8 @@ import pekko.io.dns.internal.DnsClient.{ Answer, Question4, Question6, SrvQuesti import pekko.testkit.{ AkkaSpec, TestProbe, WithLogCapturing } class AsyncDnsResolverSpec extends AkkaSpec(""" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] """) with WithLogCapturing { val defaultConfig = ConfigFactory.parseString(""" diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/DnsClientSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/DnsClientSpec.scala index cc7fe70921..09e0ea6f6d 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/DnsClientSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/DnsClientSpec.scala @@ -18,9 +18,9 @@ import pekko.testkit.{ AkkaSpec, ImplicitSender, TestProbe } class DnsClientSpec extends AkkaSpec with ImplicitSender { "The async DNS client" should { - val exampleRequest = Question4(42, "akka.io") + val exampleRequest = Question4(42, "pekko.io") val exampleRequestMessage = - Message(42, MessageFlags(), questions = Seq(Question("akka.io", RecordType.A, RecordClass.IN))) + Message(42, MessageFlags(), questions = Seq(Question("pekko.io", RecordType.A, RecordClass.IN))) val exampleResponseMessage = Message(42, MessageFlags(answer = true)) val exampleResponse = Answer(42, Nil) val dnsServerAddress = InetSocketAddress.createUnresolved("foo", 53) diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/TcpDnsClientSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/TcpDnsClientSpec.scala index 4694a2e37d..6562f312e2 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/TcpDnsClientSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/io/dns/internal/TcpDnsClientSpec.scala @@ -21,7 +21,7 @@ class TcpDnsClientSpec extends AkkaSpec with ImplicitSender { "The async TCP DNS client" should { val exampleRequestMessage = - Message(42, MessageFlags(), questions = Seq(Question("akka.io", RecordType.A, RecordClass.IN))) + Message(42, MessageFlags(), questions = Seq(Question("pekko.io", RecordType.A, RecordClass.IN))) val exampleResponseMessage = Message(42, MessageFlags(answer = true)) val dnsServerAddress = InetSocketAddress.createUnresolved("foo", 53) val localAddress = InetSocketAddress.createUnresolved("localhost", 13441) diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/pattern/AskSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/pattern/AskSpec.scala index 1f9c66549e..cff0fb8136 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/pattern/AskSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/pattern/AskSpec.scala @@ -19,8 +19,8 @@ import pekko.util.Timeout @nowarn class AskSpec extends AkkaSpec(""" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] """) with WithLogCapturing { "The “ask” pattern" must { diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/pattern/BackoffOnRestartSupervisorSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/pattern/BackoffOnRestartSupervisorSpec.scala index b7d3aba18a..78bf2ad94c 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/pattern/BackoffOnRestartSupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/pattern/BackoffOnRestartSupervisorSpec.scala @@ -55,8 +55,8 @@ class TestParentActor(probe: ActorRef, supervisorProps: Props) extends Actor { } class BackoffOnRestartSupervisorSpec extends AkkaSpec(""" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] """) with WithLogCapturing with ImplicitSender { def supervisorProps(probeRef: ActorRef) = { diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/pattern/CircuitBreakerSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/pattern/CircuitBreakerSpec.scala index 01207e23d1..fde8fb39da 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/pattern/CircuitBreakerSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/pattern/CircuitBreakerSpec.scala @@ -84,7 +84,7 @@ object CircuitBreakerSpec { } class CircuitBreakerSpec extends AkkaSpec(""" - akka.circuit-breaker { + pekko.circuit-breaker { identified { max-failures = 1 call-timeout = 100 ms diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/routing/BalancingSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/routing/BalancingSpec.scala index 0589b616d0..4f7f6b4e67 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/routing/BalancingSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/routing/BalancingSpec.scala @@ -61,8 +61,8 @@ object BalancingSpec { } class BalancingSpec extends AkkaSpec(""" - akka.loglevel=debug - akka.actor.deployment { + pekko.loglevel=debug + pekko.actor.deployment { /balancingPool-2 { router = balancing-pool nr-of-instances = 5 diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/routing/ConfiguredLocalRoutingSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/routing/ConfiguredLocalRoutingSpec.scala index f5b7144945..3f26b2e124 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/routing/ConfiguredLocalRoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/routing/ConfiguredLocalRoutingSpec.scala @@ -26,7 +26,7 @@ import pekko.testkit.TestProbe object ConfiguredLocalRoutingSpec { val config = """ - akka { + pekko { actor { default-dispatcher { executor = "thread-pool-executor" diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/routing/ConsistentHashingRouterSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/routing/ConsistentHashingRouterSpec.scala index a0b787d857..7f815fb4bc 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/routing/ConsistentHashingRouterSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/routing/ConsistentHashingRouterSpec.scala @@ -21,11 +21,11 @@ import pekko.testkit.AkkaSpec object ConsistentHashingRouterSpec { val config = """ - akka.actor { + pekko.actor { # consistent hashing is serializing the hash key, unless it's bytes or string allow-java-serialization = on } - akka.actor.deployment { + pekko.actor.deployment { /router1 { router = consistent-hashing-pool nr-of-instances = 3 diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/routing/ResizerSpec.scala index 6862070128..4eee4bade4 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/routing/ResizerSpec.scala @@ -19,7 +19,7 @@ import pekko.testkit.TestEvent._ object ResizerSpec { val config = """ - akka.actor.deployment { + pekko.actor.deployment { /router1 { router = round-robin-pool resizer { @@ -53,7 +53,7 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with "Resizer fromConfig" must { def parseCfg(cfgString: String): Config = { val referenceCfg = ConfigFactory.defaultReference(ActorSystem.findClassLoader()) - ConfigFactory.parseString(cfgString).withFallback(referenceCfg.getConfig("akka.actor.deployment.default")) + ConfigFactory.parseString(cfgString).withFallback(referenceCfg.getConfig("pekko.actor.deployment.default")) } "load DefaultResizer from config when resizer is enabled" in { diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/routing/RoutingSpec.scala index 0fa506f52f..0a4bd1e0eb 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/routing/RoutingSpec.scala @@ -21,7 +21,7 @@ import pekko.testkit._ object RoutingSpec { val config = """ - akka.actor.deployment { + pekko.actor.deployment { /router1 { router = round-robin-pool nr-of-instances = 3 @@ -239,7 +239,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with val sys = ActorSystem( "FromConfig", ConfigFactory - .parseString("akka.actor.deployment./routed.router=round-robin-pool") + .parseString("pekko.actor.deployment./routed.router=round-robin-pool") .withFallback(system.settings.config)) try { sys.actorOf(FromConfig.props(routeeProps = Props[TestActor]()), "routed") diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/AsyncSerializeSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/AsyncSerializeSpec.scala index 961b78c909..bedeb9fd59 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/AsyncSerializeSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/AsyncSerializeSpec.scala @@ -20,7 +20,7 @@ object AsyncSerializeSpec { case class Message4(str: String) val config = ConfigFactory.parseString(s""" - akka { + pekko { actor { serializers { async = "org.apache.pekko.serialization.AsyncSerializeSpec$$TestAsyncSerializer" diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/DisabledJavaSerializerWarningSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/DisabledJavaSerializerWarningSpec.scala index 0fb681b914..9bdd8a64df 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/DisabledJavaSerializerWarningSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/DisabledJavaSerializerWarningSpec.scala @@ -17,7 +17,7 @@ object DisabledJavaSerializerWarningSpec { } class DisabledJavaSerializerWarningSpec extends AkkaSpec(""" - akka.actor { + pekko.actor { allow-java-serialization = off serialize-messages = on no-serialization-verification-needed-class-prefix = [] diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/SerializationSetupSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/SerializationSetupSpec.scala index 174b0467da..61f7b2bde4 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/SerializationSetupSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/SerializationSetupSpec.scala @@ -55,7 +55,7 @@ object SerializationSetupSpec { val bootstrapSettings = BootstrapSetup( None, Some(ConfigFactory.parseString(""" - akka { + pekko { actor { allow-java-serialization = on @@ -74,7 +74,7 @@ object SerializationSetupSpec { val noJavaSerializationSystem = ActorSystem( "SerializationSettingsSpec" + "NoJavaSerialization", ConfigFactory.parseString(""" - akka { + pekko { actor { allow-java-serialization = off # this is by default on, but tests are running with off, use defaults here @@ -114,9 +114,9 @@ class SerializationSetupSpec "fail during ActorSystem creation when misconfigured" in { val config = ConfigFactory.parseString(""" - akka.loglevel = OFF - akka.stdout-loglevel = OFF - akka.actor.serializers.doe = "john.is.not.here" + pekko.loglevel = OFF + pekko.stdout-loglevel = OFF + pekko.actor.serializers.doe = "john.is.not.here" """).withFallback(ConfigFactory.load()) a[ClassNotFoundException] should be thrownBy { @@ -142,7 +142,7 @@ class SerializationSetupSpec val addedJavaSerializationProgramaticallyButDisabledSettings = BootstrapSetup( None, Some(ConfigFactory.parseString(""" - akka { + pekko { loglevel = debug actor { allow-java-serialization = off @@ -163,7 +163,7 @@ class SerializationSetupSpec "throw if passed system to JavaSerializer has allow-java-serialization = off" in { intercept[DisabledJavaSerializer.JavaSerializationException] { new JavaSerializer(noJavaSerializationSystem.asInstanceOf[ExtendedActorSystem]) - }.getMessage should include("akka.actor.allow-java-serialization = off") + }.getMessage should include("pekko.actor.allow-java-serialization = off") intercept[DisabledJavaSerializer.JavaSerializationException] { SerializationExtension(addedJavaSerializationViaSettingsSystem) diff --git a/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/SerializeSpec.scala b/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/SerializeSpec.scala index fd2b9848ea..3f3bd8a065 100644 --- a/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/SerializeSpec.scala +++ b/akka-actor-tests/src/test/scala/org/apache/pekko/serialization/SerializeSpec.scala @@ -28,7 +28,7 @@ import pekko.util.ByteString object SerializationTests { val serializeConf = s""" - akka { + pekko { actor { serializers { test = "org.apache.pekko.serialization.NoopSerializer" @@ -92,7 +92,7 @@ object SerializationTests { } val verifySerializabilityConf = """ - akka { + pekko { actor { serialize-messages = on serialize-creators = on @@ -128,8 +128,8 @@ object SerializationTests { val referenceConf = ConfigFactory.defaultReference() val conf = ConfigFactory .parseString(""" - akka.actor.warn-about-java-serializer-usage = on - akka.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = on + pekko.actor.allow-java-serialization = on """) .withFallback(ConfigFactory.parseString(serializeConf)) .withFallback(AkkaSpec.testConf.withFallback(referenceConf)) @@ -137,7 +137,7 @@ object SerializationTests { } val systemMessageMultiSerializerConf = """ - akka { + pekko { actor { serializers { test = "org.apache.pekko.serialization.NoopSerializer" @@ -277,7 +277,7 @@ class SerializeSpec extends AkkaSpec(SerializationTests.serializeConf) { val sys = ActorSystem( "SerializeSpec", ConfigFactory.parseString(s""" - akka { + pekko { actor { serializers { test = "org.apache.pekko.serialization.NoopSerializer" @@ -504,9 +504,9 @@ class AllowJavaSerializationSpec extends AkkaSpec(SerializationTests.allowJavaSe class NoVerificationWarningSpec extends AkkaSpec(ConfigFactory.parseString(""" - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = on - akka.actor.warn-on-no-serialization-verification = on + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = on + pekko.actor.warn-on-no-serialization-verification = on """)) { val ser = SerializationExtension(system) @@ -530,9 +530,9 @@ class NoVerificationWarningSpec class NoVerificationWarningOffSpec extends AkkaSpec(ConfigFactory.parseString(""" - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = on - akka.actor.warn-on-no-serialization-verification = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = on + pekko.actor.warn-on-no-serialization-verification = off """)) { val ser = SerializationExtension(system) @@ -563,7 +563,7 @@ class SerializerDeadlockSpec extends AkkaSpec { val sys = ActorSystem( "SerializerDeadlockSpec", ConfigFactory.parseString(""" - akka { + pekko { actor { creation-timeout = 1s serializers { diff --git a/akka-actor-typed-tests/src/test/java/jdocs/org/apache/pekko/typed/BubblingSampleTest.java b/akka-actor-typed-tests/src/test/java/jdocs/org/apache/pekko/typed/BubblingSampleTest.java index 41fcd70ae6..36a343e17e 100644 --- a/akka-actor-typed-tests/src/test/java/jdocs/org/apache/pekko/typed/BubblingSampleTest.java +++ b/akka-actor-typed-tests/src/test/java/jdocs/org/apache/pekko/typed/BubblingSampleTest.java @@ -22,7 +22,7 @@ public class BubblingSampleTest extends JUnitSuite { @ClassRule public static final TestKitJunitResource testKit = - new TestKitJunitResource("akka.loglevel = off"); + new TestKitJunitResource("pekko.loglevel = off"); @Rule public final LogCapturing logCapturing = new LogCapturing(); diff --git a/akka-actor-typed-tests/src/test/java/jdocs/org/apache/pekko/typed/IntroTest.java b/akka-actor-typed-tests/src/test/java/jdocs/org/apache/pekko/typed/IntroTest.java index ad3945b931..a25bf7bd7d 100644 --- a/akka-actor-typed-tests/src/test/java/jdocs/org/apache/pekko/typed/IntroTest.java +++ b/akka-actor-typed-tests/src/test/java/jdocs/org/apache/pekko/typed/IntroTest.java @@ -170,7 +170,7 @@ public interface IntroTest { private HelloWorldMain(ActorContext context) { super(context); - final String dispatcherPath = "akka.actor.default-blocking-io-dispatcher"; + final String dispatcherPath = "pekko.actor.default-blocking-io-dispatcher"; Props greeterProps = DispatcherSelector.fromConfig(dispatcherPath); greeter = getContext().spawn(HelloWorld.create(), "greeter", greeterProps); } diff --git a/akka-actor-typed-tests/src/test/java/org/apache/pekko/actor/typed/ExtensionsTest.java b/akka-actor-typed-tests/src/test/java/org/apache/pekko/actor/typed/ExtensionsTest.java index 39046694df..9d8c03cf93 100644 --- a/akka-actor-typed-tests/src/test/java/org/apache/pekko/actor/typed/ExtensionsTest.java +++ b/akka-actor-typed-tests/src/test/java/org/apache/pekko/actor/typed/ExtensionsTest.java @@ -52,7 +52,7 @@ public class ExtensionsTest extends JUnitSuite { public void loadJavaExtensionsFromConfig() { Config cfg = ConfigFactory.parseString( - "akka.actor.typed.extensions += \"org.apache.pekko.actor.typed.ExtensionsTest$MyExtension\"") + "pekko.actor.typed.extensions += \"org.apache.pekko.actor.typed.ExtensionsTest$MyExtension\"") .resolve(); final ActorSystem system = ActorSystem.create(Behaviors.empty(), "loadJavaExtensionsFromConfig", cfg); diff --git a/akka-actor-typed-tests/src/test/java/org/apache/pekko/actor/typed/javadsl/ActorLoggingTest.java b/akka-actor-typed-tests/src/test/java/org/apache/pekko/actor/typed/javadsl/ActorLoggingTest.java index e8e9ebd76c..f3e0f2a0d3 100644 --- a/akka-actor-typed-tests/src/test/java/org/apache/pekko/actor/typed/javadsl/ActorLoggingTest.java +++ b/akka-actor-typed-tests/src/test/java/org/apache/pekko/actor/typed/javadsl/ActorLoggingTest.java @@ -28,8 +28,8 @@ public class ActorLoggingTest extends JUnitSuite { public static final TestKitJunitResource testKit = new TestKitJunitResource( ConfigFactory.parseString( - "akka.loglevel = INFO\n" - + "akka.loggers = [\"org.apache.pekko.testkit.TestEventListener\"]")); + "pekko.loglevel = INFO\n" + + "pekko.loggers = [\"org.apache.pekko.testkit.TestEventListener\"]")); interface Protocol { String getTransactionId(); diff --git a/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/DispatchersDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/DispatchersDocSpec.scala index 6e204368bf..015e1db297 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/DispatchersDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/DispatchersDocSpec.scala @@ -71,12 +71,12 @@ class DispatchersDocSpec val withDefault: Future[ActorRef[WhichDispatcher]] = actor.ask(Spawn(giveMeYourDispatcher, "default", Props.empty, _)) withDefault.futureValue ! WhichDispatcher(probe.ref) - probe.receiveMessage().id shouldEqual "akka.actor.default-dispatcher" + probe.receiveMessage().id shouldEqual "pekko.actor.default-dispatcher" val withBlocking: Future[ActorRef[WhichDispatcher]] = actor.ask(Spawn(giveMeYourDispatcher, "default", DispatcherSelector.blocking(), _)) withBlocking.futureValue ! WhichDispatcher(probe.ref) - probe.receiveMessage().id shouldEqual "akka.actor.default-blocking-io-dispatcher" + probe.receiveMessage().id shouldEqual "pekko.actor.default-blocking-io-dispatcher" val withCustom: Future[ActorRef[WhichDispatcher]] = actor.ask(Spawn(giveMeYourDispatcher, "default", DispatcherSelector.fromConfig("your-dispatcher"), _)) diff --git a/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/FaultToleranceDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/FaultToleranceDocSpec.scala index 7c416f45cc..4de0468e8a 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/FaultToleranceDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/FaultToleranceDocSpec.scala @@ -81,7 +81,7 @@ object FaultToleranceDocSpec { @nowarn("msg=never used") class FaultToleranceDocSpec extends ScalaTestWithActorTestKit(""" # silenced to not put noise in test logs - akka.loglevel = off + pekko.loglevel = off """) with AnyWordSpecLike { import FaultToleranceDocSpec._ diff --git a/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/IntroSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/IntroSpec.scala index 8fae32e43a..8390397d5c 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/IntroSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/IntroSpec.scala @@ -113,7 +113,7 @@ object IntroSpec { // #hello-world-main-with-dispatchers def apply(): Behavior[SayHello] = Behaviors.setup { context => - val dispatcherPath = "akka.actor.default-blocking-io-dispatcher" + val dispatcherPath = "pekko.actor.default-blocking-io-dispatcher" val props = DispatcherSelector.fromConfig(dispatcherPath) val greeter = context.spawn(HelloWorld(), "greeter", props) diff --git a/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/RouterSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/RouterSpec.scala index 21b8ce777b..8a275332e6 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/RouterSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/RouterSpec.scala @@ -49,7 +49,7 @@ object RouterSpec { // format: ON } -class RouterSpec extends ScalaTestWithActorTestKit("akka.loglevel=warning") with AnyWordSpecLike with LogCapturing { +class RouterSpec extends ScalaTestWithActorTestKit("pekko.loglevel=warning") with AnyWordSpecLike with LogCapturing { import RouterSpec._ "The routing sample" must { diff --git a/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/extensions/ExtensionDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/extensions/ExtensionDocSpec.scala index ead888ab0b..56841711b1 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/extensions/ExtensionDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/extensions/ExtensionDocSpec.scala @@ -46,7 +46,7 @@ class DatabasePool(system: ActorSystem[_]) extends Extension { object ExtensionDocSpec { val config = ConfigFactory.parseString(""" #config - akka.actor.typed.extensions = ["org.apache.pekko.akka.extensions.DatabasePool"] + pekko.actor.typed.extensions = ["org.apache.pekko.pekko.extensions.DatabasePool"] #config """) diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/AskSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/AskSpec.scala index 1743013452..7f9762842d 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/AskSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/AskSpec.scala @@ -30,8 +30,8 @@ object AskSpec { } class AskSpec extends ScalaTestWithActorTestKit(""" - akka.loglevel=DEBUG - akka.actor.debug.event-stream = on + pekko.loglevel=DEBUG + pekko.actor.debug.event-stream = on """) with AnyWordSpecLike with LogCapturing { import AskSpec._ diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/ExtensionsSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/ExtensionsSpec.scala index 3035ef42a6..4e1e29779b 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/ExtensionsSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/ExtensionsSpec.scala @@ -69,7 +69,7 @@ class AccessSystemFromConstructor(system: ActorSystem[_]) extends Extension { object ExtensionsSpec { val config = ConfigFactory.parseString(""" -akka.actor.typed { +pekko.actor.typed { library-extensions += "org.apache.pekko.actor.typed.InstanceCountingExtension" } """).resolve() @@ -115,7 +115,7 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with Some( ConfigFactory.parseString( """ - akka.actor.typed.extensions = ["org.apache.pekko.actor.typed.DummyExtension1$", "org.apache.pekko.actor.typed.SlowExtension$"] + pekko.actor.typed.extensions = ["org.apache.pekko.actor.typed.DummyExtension1$", "org.apache.pekko.actor.typed.SlowExtension$"] """))) { sys => sys.hasExtension(DummyExtension1) should ===(true) sys.extension(DummyExtension1) shouldBe a[DummyExtension1] @@ -130,7 +130,7 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with Behaviors.empty[Any], "ExtensionsSpec04", ConfigFactory.parseString(""" - akka.actor.typed.extensions = ["org.apache.pekko.actor.typed.FailingToLoadExtension$"] + pekko.actor.typed.extensions = ["org.apache.pekko.actor.typed.FailingToLoadExtension$"] """)) } @@ -155,7 +155,7 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with "allow for auto-loading of library-extensions" in withEmptyActorSystem("ExtensionsSpec06") { sys => - val listedExtensions = sys.settings.config.getStringList("akka.actor.typed.library-extensions") + val listedExtensions = sys.settings.config.getStringList("pekko.actor.typed.library-extensions") listedExtensions.size should be > 0 // could be initialized by other tests, so at least once InstanceCountingExtension.createCount.get() should be > 0 @@ -167,7 +167,7 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with "ExtensionsSpec07", Some( ConfigFactory.parseString( - """akka.actor.typed.library-extensions += "org.apache.pekko.actor.typed.FailingToLoadExtension$""""))) { + """pekko.actor.typed.library-extensions += "org.apache.pekko.actor.typed.FailingToLoadExtension$""""))) { _ => () } @@ -178,7 +178,7 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with withEmptyActorSystem( "ExtensionsSpec08", Some(ConfigFactory.parseString( - """akka.actor.typed.library-extensions += "org.apache.pekko.actor.typed.MissingExtension""""))) { _ => + """pekko.actor.typed.library-extensions += "org.apache.pekko.actor.typed.MissingExtension""""))) { _ => () } } @@ -230,7 +230,7 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with Some( ConfigFactory.parseString( """ - akka.actor.typed.extensions = ["org.apache.pekko.actor.typed.DummyExtension1$", "org.apache.pekko.actor.typed.SlowExtension$"] + pekko.actor.typed.extensions = ["org.apache.pekko.actor.typed.DummyExtension1$", "org.apache.pekko.actor.typed.SlowExtension$"] """)), Some(ActorSystemSetup(new DummyExtension1Setup(_ => new DummyExtension1ViaSetup)))) { sys => sys.hasExtension(DummyExtension1) should ===(true) @@ -248,7 +248,7 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with Some( ConfigFactory.parseString( """ - akka.actor.typed.extensions = ["org.apache.pekko.actor.typed.AccessSystemFromConstructorExtensionId$"] + pekko.actor.typed.extensions = ["org.apache.pekko.actor.typed.AccessSystemFromConstructorExtensionId$"] """)), None) { sys => AccessSystemFromConstructorExtensionId(sys) // would throw if it couldn't diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/LocalActorRefProviderLogMessagesSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/LocalActorRefProviderLogMessagesSpec.scala index e60b1abb8c..3e7b343e16 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/LocalActorRefProviderLogMessagesSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/LocalActorRefProviderLogMessagesSpec.scala @@ -11,7 +11,7 @@ import org.scalatest.wordspec.AnyWordSpecLike object LocalActorRefProviderLogMessagesSpec { val config = """ - akka { + pekko { loglevel = DEBUG # test verifies debug log-dead-letters = on actor { diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/LogMessagesSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/LogMessagesSpec.scala index e48d939a2d..6be3de231a 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/LogMessagesSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/LogMessagesSpec.scala @@ -16,7 +16,7 @@ import pekko.actor.typed.scaladsl.Behaviors import pekko.actor.typed.scaladsl.adapter._ class LogMessagesSpec extends ScalaTestWithActorTestKit(""" - akka.loglevel = DEBUG # test verifies debug + pekko.loglevel = DEBUG # test verifies debug """) with AnyWordSpecLike with LogCapturing { implicit val classic: actor.ActorSystem = system.toClassic diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/SupervisionSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/SupervisionSpec.scala index 5ff088d619..47f6df1579 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/SupervisionSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/SupervisionSpec.scala @@ -261,7 +261,7 @@ class StubbedSupervisionSpec extends AnyWordSpec with Matchers with LogCapturing } class SupervisionSpec extends ScalaTestWithActorTestKit(""" - akka.log-dead-letters = off + pekko.log-dead-letters = off """) with AnyWordSpecLike with LogCapturing { import BehaviorInterceptor._ diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/coexistence/ClassicSupervisingTypedSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/coexistence/ClassicSupervisingTypedSpec.scala index ffeb1ed47d..787179faf8 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/coexistence/ClassicSupervisingTypedSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/coexistence/ClassicSupervisingTypedSpec.scala @@ -55,7 +55,7 @@ class ClassicSupervisingTypedSpec extends AnyWordSpecLike with LogCapturing with val classicSystem = pekko.actor.ActorSystem( "ClassicSupervisingTypedSpec", ConfigFactory.parseString(""" - akka.actor.testkit.typed.expect-no-message-default = 50 ms + pekko.actor.testkit.typed.expect-no-message-default = 50 ms """)) val classicTestKit = new pekko.testkit.TestKit(classicSystem) implicit val classicSender: u.ActorRef = classicTestKit.testActor diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/coexistence/TypedSupervisingClassicSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/coexistence/TypedSupervisingClassicSpec.scala index 22744d1391..740f0557b1 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/coexistence/TypedSupervisingClassicSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/coexistence/TypedSupervisingClassicSpec.scala @@ -44,7 +44,7 @@ object TypedSupervisingClassicSpec { } class TypedSupervisingClassicSpec extends ScalaTestWithActorTestKit(""" - akka.loglevel = INFO + pekko.loglevel = INFO """.stripMargin) with AnyWordSpecLike with LogCapturing { import TypedSupervisingClassicSpec._ diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ConsumerControllerSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ConsumerControllerSpec.scala index 48517f326c..b6a3baa0ba 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ConsumerControllerSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ConsumerControllerSpec.scala @@ -18,7 +18,7 @@ import pekko.serialization.SerializationExtension class ConsumerControllerSpec extends ScalaTestWithActorTestKit(ConfigFactory.parseString(""" - akka.reliable-delivery.consumer-controller { + pekko.reliable-delivery.consumer-controller { flow-control-window = 20 resend-interval-min = 1s } diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/DurableProducerControllerSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/DurableProducerControllerSpec.scala index e9ffc41f39..4340fdbc58 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/DurableProducerControllerSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/DurableProducerControllerSpec.scala @@ -23,8 +23,8 @@ import pekko.util.ByteString class DurableProducerControllerSpec extends ScalaTestWithActorTestKit( ConfigFactory.parseString(""" - akka.reliable-delivery.consumer-controller.flow-control-window = 20 - akka.reliable-delivery.consumer-controller.resend-interval-min = 1s + pekko.reliable-delivery.consumer-controller.flow-control-window = 20 + pekko.reliable-delivery.consumer-controller.resend-interval-min = 1s """).withFallback(TestSerializer.config)) with AnyWordSpecLike with LogCapturing { diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/DurableWorkPullingSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/DurableWorkPullingSpec.scala index 1decb1769f..580a8f2ef1 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/DurableWorkPullingSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/DurableWorkPullingSpec.scala @@ -22,7 +22,7 @@ import pekko.actor.typed.receptionist.ServiceKey class DurableWorkPullingSpec extends ScalaTestWithActorTestKit(""" - akka.reliable-delivery.consumer-controller.flow-control-window = 20 + pekko.reliable-delivery.consumer-controller.flow-control-window = 20 """) with AnyWordSpecLike with LogCapturing { diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ProducerControllerSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ProducerControllerSpec.scala index 6bec8535da..74a9c063df 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ProducerControllerSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ProducerControllerSpec.scala @@ -19,7 +19,7 @@ import pekko.actor.typed.delivery.internal.ProducerControllerImpl class ProducerControllerSpec extends ScalaTestWithActorTestKit( ConfigFactory.parseString(""" - akka.reliable-delivery.consumer-controller.flow-control-window = 20 + pekko.reliable-delivery.consumer-controller.flow-control-window = 20 """).withFallback(TestSerializer.config)) with AnyWordSpecLike with LogCapturing { diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ReliableDeliveryRandomSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ReliableDeliveryRandomSpec.scala index 03ea79aedc..98039c74ac 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ReliableDeliveryRandomSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ReliableDeliveryRandomSpec.scala @@ -25,7 +25,7 @@ import pekko.actor.typed.scaladsl.LoggerOps object ReliableDeliveryRandomSpec { val config: Config = ConfigFactory.parseString(""" - akka.reliable-delivery.consumer-controller { + pekko.reliable-delivery.consumer-controller { flow-control-window = 20 resend-interval-min = 500 ms resend-interval-max = 2 s @@ -215,5 +215,5 @@ class ReliableDeliveryRandomSpec(config: Config) class ReliableDeliveryRandomChunkedSpec extends ReliableDeliveryRandomSpec( ConfigFactory.parseString(""" - akka.reliable-delivery.producer-controller.chunk-large-messages = 1b + pekko.reliable-delivery.producer-controller.chunk-large-messages = 1b """).withFallback(TestSerializer.config).withFallback(ReliableDeliveryRandomSpec.config)) diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ReliableDeliverySpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ReliableDeliverySpec.scala index fa080f37f2..01db20f267 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ReliableDeliverySpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/ReliableDeliverySpec.scala @@ -16,7 +16,7 @@ import pekko.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit object ReliableDeliverySpec { val config: Config = ConfigFactory.parseString(""" - akka.reliable-delivery.consumer-controller.flow-control-window = 20 + pekko.reliable-delivery.consumer-controller.flow-control-window = 20 """) } @@ -209,5 +209,5 @@ class ReliableDeliverySpec(config: Config) class ReliableDeliveryChunkedSpec extends ReliableDeliverySpec( ConfigFactory.parseString(""" - akka.reliable-delivery.producer-controller.chunk-large-messages = 1b + pekko.reliable-delivery.producer-controller.chunk-large-messages = 1b """).withFallback(TestSerializer.config).withFallback(ReliableDeliverySpec.config)) diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/TestConsumer.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/TestConsumer.scala index 4d948ad636..36497a7d4b 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/TestConsumer.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/TestConsumer.scala @@ -126,8 +126,8 @@ class TestConsumer( object TestSerializer { val config: Config = ConfigFactory.parseString(s""" - akka.actor.serializers.delivery-test = ${classOf[TestSerializer].getName} - akka.actor.serialization-bindings { + pekko.actor.serializers.delivery-test = ${classOf[TestSerializer].getName} + pekko.actor.serialization-bindings { "${classOf[TestConsumer.Job].getName}" = delivery-test } """) diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/WorkPullingSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/WorkPullingSpec.scala index 0994337fa4..656891cf8d 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/WorkPullingSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/delivery/WorkPullingSpec.scala @@ -19,7 +19,7 @@ import pekko.actor.typed.receptionist.ServiceKey class WorkPullingSpec extends ScalaTestWithActorTestKit(""" - akka.reliable-delivery.consumer-controller.flow-control-window = 20 + pekko.reliable-delivery.consumer-controller.flow-control-window = 20 """) with AnyWordSpecLike with LogCapturing { diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/ActorRefSerializationSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/ActorRefSerializationSpec.scala index a333f9a586..d29fdcb528 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/ActorRefSerializationSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/ActorRefSerializationSpec.scala @@ -16,13 +16,13 @@ import pekko.serialization.{ JavaSerializer, SerializationExtension } object ActorRefSerializationSpec { def config = ConfigFactory.parseString(""" - akka.actor { + pekko.actor { # test is verifying Java serialization of ActorRef allow-java-serialization = on warn-about-java-serializer-usage = off } - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """) case class MessageWrappingActorRef(s: String, ref: ActorRef[Unit]) extends java.io.Serializable diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/ActorSystemSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/ActorSystemSpec.scala index af9f7d09db..ab4452256c 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/ActorSystemSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/ActorSystemSpec.scala @@ -36,7 +36,7 @@ class ActorSystemSpec with Eventually with LogCapturing { - private val testKitSettings = TestKitSettings(ConfigFactory.load().getConfig("akka.actor.testkit.typed")) + private val testKitSettings = TestKitSettings(ConfigFactory.load().getConfig("pekko.actor.testkit.typed")) override implicit val patienceConfig: PatienceConfig = PatienceConfig(testKitSettings.SingleExpectDefaultTimeout, Span(100, org.scalatest.time.Millis)) def system[T](behavior: Behavior[T], name: String, props: Props = Props.empty) = diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/adpater/PropsAdapterSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/adpater/PropsAdapterSpec.scala index 742ae6d5d5..f4c2cbc910 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/adpater/PropsAdapterSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/adpater/PropsAdapterSpec.scala @@ -19,7 +19,7 @@ class PropsAdapterSpec extends AnyWordSpec with Matchers { "default to org.apache.pekko.dispatch.SingleConsumerOnlyUnboundedMailbox" in { val props: Props = Props.empty val pa: actor.Props = PropsAdapter(() => Behaviors.empty, props, rethrowTypedFailure = false) - pa.mailbox shouldEqual "akka.actor.typed.default-mailbox" + pa.mailbox shouldEqual "pekko.actor.typed.default-mailbox" } } } diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/routing/PoolRouterSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/routing/PoolRouterSpec.scala index 1146349c0f..270b894be4 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/routing/PoolRouterSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/internal/routing/PoolRouterSpec.scala @@ -37,7 +37,7 @@ class PoolRouterSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with pool ! WhichDispatcher(probe.ref) val response = probe.receiveMessage() - response should startWith("PoolRouterSpec-akka.actor.default-dispatcher") + response should startWith("PoolRouterSpec-pekko.actor.default-dispatcher") } "use the specified dispatcher for its routees" in { @@ -48,7 +48,7 @@ class PoolRouterSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with pool ! WhichDispatcher(probe.ref) val response = probe.receiveMessage() - response should startWith("PoolRouterSpec-akka.actor.default-blocking-io-dispatcher") + response should startWith("PoolRouterSpec-pekko.actor.default-blocking-io-dispatcher") } } } diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/ActorLoggingSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/ActorLoggingSpec.scala index cf26e27354..2091cba18a 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/ActorLoggingSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/ActorLoggingSpec.scala @@ -54,7 +54,7 @@ class BehaviorWhereTheLoggerIsUsed(context: ActorContext[String]) extends Abstra } class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" - akka.loglevel = DEBUG # test verifies debug + pekko.loglevel = DEBUG # test verifies debug """) with AnyWordSpecLike with LogCapturing { val marker = new BasicMarkerFactory().getMarker("marker") @@ -287,9 +287,9 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" import pekko.actor.typed.scaladsl.adapter._ "by default be amended to use Slf4jLogger" in { - system.settings.config.getStringList("akka.loggers").size() should ===(1) - system.settings.config.getStringList("akka.loggers").get(0) should ===(classOf[Slf4jLogger].getName) - system.settings.config.getString("akka.logging-filter") should ===(classOf[Slf4jLoggingFilter].getName) + system.settings.config.getStringList("pekko.loggers").size() should ===(1) + system.settings.config.getStringList("pekko.loggers").get(0) should ===(classOf[Slf4jLogger].getName) + system.settings.config.getString("pekko.logging-filter") should ===(classOf[Slf4jLoggingFilter].getName) system.toClassic.settings.Loggers should ===(List(classOf[Slf4jLogger].getName)) system.toClassic.settings.LoggingFilter should ===(classOf[Slf4jLoggingFilter].getName) @@ -298,9 +298,9 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" "by default be amended to use Slf4jLogger when starting classic ActorSystem" in { val classicSys = pekko.actor.ActorSystem(system.name) try { - classicSys.settings.config.getStringList("akka.loggers").size() should ===(1) - classicSys.settings.config.getStringList("akka.loggers").get(0) should ===(classOf[Slf4jLogger].getName) - classicSys.settings.config.getString("akka.logging-filter") should ===(classOf[Slf4jLoggingFilter].getName) + classicSys.settings.config.getStringList("pekko.loggers").size() should ===(1) + classicSys.settings.config.getStringList("pekko.loggers").get(0) should ===(classOf[Slf4jLogger].getName) + classicSys.settings.config.getString("pekko.logging-filter") should ===(classOf[Slf4jLoggingFilter].getName) classicSys.settings.Loggers should ===(List(classOf[Slf4jLogger].getName)) classicSys.settings.LoggingFilter should ===(classOf[Slf4jLoggingFilter].getName) @@ -313,11 +313,11 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" "not be amended when use-slf4j=off" in { val dynamicAccess = system.toClassic.asInstanceOf[ExtendedActorSystem].dynamicAccess val config = ClassicActorSystem.Settings.amendSlf4jConfig( - ConfigFactory.parseString("akka.use-slf4j = off").withFallback(ConfigFactory.defaultReference()), + ConfigFactory.parseString("pekko.use-slf4j = off").withFallback(ConfigFactory.defaultReference()), dynamicAccess) - config.getStringList("akka.loggers").size() should ===(1) - config.getStringList("akka.loggers").get(0) should ===(classOf[DefaultLogger].getName) - config.getString("akka.logging-filter") should ===(classOf[DefaultLoggingFilter].getName) + config.getStringList("pekko.loggers").size() should ===(1) + config.getStringList("pekko.loggers").get(0) should ===(classOf[DefaultLogger].getName) + config.getString("pekko.logging-filter") should ===(classOf[DefaultLoggingFilter].getName) } } diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/DispatcherSelectorSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/DispatcherSelectorSpec.scala index 75e6357171..1395f0f335 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/DispatcherSelectorSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/DispatcherSelectorSpec.scala @@ -116,7 +116,7 @@ class DispatcherSelectorSpec(config: Config) sys ! Ping(probe.ref) val response = probe.receiveMessage() - response.threadName should startWith("DispatcherSelectorSpec2-akka.actor.default-dispatcher") + response.threadName should startWith("DispatcherSelectorSpec2-pekko.actor.default-dispatcher") } finally { ActorTestKit.shutdown(sys) } diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/MessageAdapterSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/MessageAdapterSpec.scala index 70b0f90f0b..3e188cd0ad 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/MessageAdapterSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/MessageAdapterSpec.scala @@ -24,7 +24,7 @@ import pekko.actor.typed.internal.AdaptMessage object MessageAdapterSpec { val config = ConfigFactory.parseString(""" - akka.log-dead-letters = on + pekko.log-dead-letters = on ping-pong-dispatcher { executor = thread-pool-executor type = PinnedDispatcher diff --git a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/RoutersSpec.scala b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/RoutersSpec.scala index 9f38e85639..43f93aedb5 100644 --- a/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/RoutersSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/org/apache/pekko/actor/typed/scaladsl/RoutersSpec.scala @@ -20,7 +20,7 @@ import pekko.actor.typed.receptionist.ServiceKey import pekko.actor.typed.scaladsl.adapter._ class RoutersSpec extends ScalaTestWithActorTestKit(""" - akka.loglevel=debug + pekko.loglevel=debug """) with AnyWordSpecLike with Matchers with LogCapturing { // needed for the event filter diff --git a/akka-actor-typed/src/main/resources/reference.conf b/akka-actor-typed/src/main/resources/reference.conf index 044330b8e0..4afc8bb7d3 100644 --- a/akka-actor-typed/src/main/resources/reference.conf +++ b/akka-actor-typed/src/main/resources/reference.conf @@ -1,8 +1,8 @@ -akka.actor.typed { +pekko.actor.typed { # List FQCN of `org.apache.pekko.actor.typed.ExtensionId`s which shall be loaded at actor system startup. # Should be on the format: 'extensions = ["com.example.MyExtId1", "com.example.MyExtId2"]' etc. - # See the Akka Documentation for more info about Extensions + # See the Pekko Documentation for more info about Extensions extensions = [] # List FQCN of extensions which shall be loaded at actor system startup. @@ -13,7 +13,7 @@ akka.actor.typed { # # Should not be set by end user applications in 'application.conf', use the extensions property for that # - library-extensions = ${?akka.actor.typed.library-extensions} [] + library-extensions = ${?pekko.actor.typed.library-extensions} [] # Receptionist is started eagerly to allow clustered receptionist to gather remote registrations early on. library-extensions += "org.apache.pekko.actor.typed.receptionist.Receptionist$" @@ -31,9 +31,9 @@ akka.actor.typed { } # Load typed extensions by a classic extension. -akka.library-extensions += "org.apache.pekko.actor.typed.internal.adapter.ActorSystemAdapter$LoadTypedExtensions" +pekko.library-extensions += "org.apache.pekko.actor.typed.internal.adapter.ActorSystemAdapter$LoadTypedExtensions" -akka.actor { +pekko.actor { serializers { typed-misc = "org.apache.pekko.actor.typed.internal.MiscMessageSerializer" service-key = "org.apache.pekko.actor.typed.internal.receptionist.ServiceKeySerializer" @@ -51,21 +51,21 @@ akka.actor { } } -# When using Akka Typed (having akka-actor-typed in classpath) the +# When using Pekko Typed (having pekko-actor-typed in classpath) the # org.apache.pekko.event.slf4j.Slf4jLogger is enabled instead of the DefaultLogger -# even though it has not been explicitly defined in `akka.loggers` +# even though it has not been explicitly defined in `pekko.loggers` # configuration. # -# Slf4jLogger will be used for all Akka classic logging via eventStream, -# including logging from Akka internals. The Slf4jLogger is then using +# Slf4jLogger will be used for all Pekko classic logging via eventStream, +# including logging from Pekko internals. The Slf4jLogger is then using # an ordinary org.slf4j.Logger to emit the log events. # # The Slf4jLoggingFilter is also enabled automatically. # # This behavior can be disabled by setting this property to `off`. -akka.use-slf4j = on +pekko.use-slf4j = on -akka.reliable-delivery { +pekko.reliable-delivery { producer-controller { # To avoid head of line blocking from serialization and transfer @@ -112,7 +112,7 @@ akka.reliable-delivery { } work-pulling { - producer-controller = ${akka.reliable-delivery.producer-controller} + producer-controller = ${pekko.reliable-delivery.producer-controller} producer-controller { # Limit of how many messages that can be buffered when there # is no demand from the consumer side. @@ -122,7 +122,7 @@ akka.reliable-delivery { internal-ask-timeout = 60s # Chunked messages not implemented for work-pulling yet. Override to not - # propagate property from akka.reliable-delivery.producer-controller. + # propagate property from pekko.reliable-delivery.producer-controller. chunk-large-messages = off } } diff --git a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/ActorSystem.scala b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/ActorSystem.scala index 87413c1a90..f365fce80a 100644 --- a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/ActorSystem.scala +++ b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/ActorSystem.scala @@ -96,7 +96,7 @@ abstract class ActorSystem[-T] extends ActorRef[T] with Extensions with ClassicA def dispatchers: Dispatchers /** - * The default thread pool of this ActorSystem, configured with settings in `akka.actor.default-dispatcher`. + * The default thread pool of this ActorSystem, configured with settings in `pekko.actor.default-dispatcher`. */ implicit def executionContext: ExecutionContextExecutor @@ -104,7 +104,7 @@ abstract class ActorSystem[-T] extends ActorRef[T] with Extensions with ClassicA * Terminates this actor system by running [[pekko.actor.CoordinatedShutdown]] with reason * [[pekko.actor.CoordinatedShutdown.ActorSystemTerminateReason]]. * - * If `akka.coordinated-shutdown.run-by-actor-system-terminate` is configured to `off` + * If `pekko.coordinated-shutdown.run-by-actor-system-terminate` is configured to `off` * it will not run `CoordinatedShutdown`, but the `ActorSystem` and its actors * will still be terminated. * @@ -317,7 +317,7 @@ final class Settings(val config: Config, val classicSettings: classic.ActorSyste */ override def toString: String = config.root.render - private val typedConfig = config.getConfig("akka.actor.typed") + private val typedConfig = config.getConfig("pekko.actor.typed") val RestartStashCapacity: Int = typedConfig.getInt("restart-stash-capacity").requiring(_ >= 0, "restart-stash-capacity must be >= 0") diff --git a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/Dispatchers.scala b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/Dispatchers.scala index eb28f67e53..1f5ac9b9bd 100644 --- a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/Dispatchers.scala +++ b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/Dispatchers.scala @@ -14,12 +14,12 @@ object Dispatchers { * The id of the default dispatcher, also the full key of the * configuration of the default dispatcher. */ - final val DefaultDispatcherId = "akka.actor.default-dispatcher" + final val DefaultDispatcherId = "pekko.actor.default-dispatcher" /** * INTERNAL API */ - @InternalApi final val InternalDispatcherId = "akka.actor.internal-dispatcher" + @InternalApi final val InternalDispatcherId = "pekko.actor.internal-dispatcher" } /** diff --git a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/Extensions.scala b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/Extensions.scala index 6a8f42eff3..17b49c4ad2 100644 --- a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/Extensions.scala +++ b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/Extensions.scala @@ -47,7 +47,7 @@ trait Extension * // can be loaded eagerly on system startup through configuration * // note that the name is the JVM/Java class name, with a dollar sign in the end * // and not the Scala object name - * akka.actor.typed.extensions = ["com.example.MyExt$"] + * pekko.actor.typed.extensions = ["com.example.MyExt$"] * * // Allows access like this from Scala * MyExt().someMethodOnTheExtension() @@ -58,7 +58,7 @@ trait Extension * *Java API* * * To implement an extension in Java you should first create an `ExtensionId` singleton by implementing a static method - * called `getInstance`, this is needed to be able to list the extension among the `akka.actor.typed.extensions` in the configuration + * called `getInstance`, this is needed to be able to list the extension among the `pekko.actor.typed.extensions` in the configuration * and have it loaded when the actor system starts up. * * {{{ @@ -91,7 +91,7 @@ trait Extension * } * * // can be loaded eagerly on system startup through configuration - * akka.actor.typed.extensions = ["com.example.MyExt"] + * pekko.actor.typed.extensions = ["com.example.MyExt"] * * // Allows access like this from Scala * MyExt.someMethodOnTheExtension() diff --git a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/Props.scala b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/Props.scala index 9ef5f67c02..bcc05427f0 100644 --- a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/Props.scala +++ b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/Props.scala @@ -166,7 +166,7 @@ object DispatcherSelector { * Run the actor on the default blocking dispatcher that is * configured under default-blocking-io-dispatcher */ - def blocking(): DispatcherSelector = fromConfig("akka.actor.default-blocking-io-dispatcher") + def blocking(): DispatcherSelector = fromConfig("pekko.actor.default-blocking-io-dispatcher") /** * Look up an executor definition in the [[ActorSystem]] configuration. @@ -193,7 +193,7 @@ object MailboxSelector { /** * Scala API: The default mailbox is SingleConsumerOnlyUnboundedMailbox */ - def default(): MailboxSelector = fromConfig("akka.actor.typed.default-mailbox") + def default(): MailboxSelector = fromConfig("pekko.actor.typed.default-mailbox") /** * Java API: The default mailbox is SingleConsumerOnlyUnboundedMailbox diff --git a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/SupervisorStrategy.scala b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/SupervisorStrategy.scala index 858c58dff5..d7d174836f 100644 --- a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/SupervisorStrategy.scala +++ b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/SupervisorStrategy.scala @@ -284,7 +284,7 @@ sealed abstract class RestartSupervisorStrategy extends SupervisorStrategy { * the capacity in number of messages of the stash buffer. If the capacity is exceed * then additional incoming messages are dropped. * - * By default the capacity is defined by config property `akka.actor.typed.restart-stash-capacity`. + * By default the capacity is defined by config property `pekko.actor.typed.restart-stash-capacity`. */ def withStashCapacity(capacity: Int): RestartSupervisorStrategy @@ -336,7 +336,7 @@ sealed abstract class BackoffSupervisorStrategy extends SupervisorStrategy { * behavior. This property defines the capacity in number of messages of the stash * buffer. If the capacity is exceed then additional incoming messages are dropped. * - * By default the capacity is defined by config property `akka.actor.typed.restart-stash-capacity`. + * By default the capacity is defined by config property `pekko.actor.typed.restart-stash-capacity`. */ def withStashCapacity(capacity: Int): BackoffSupervisorStrategy diff --git a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/ConsumerController.scala b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/ConsumerController.scala index 639bf570c1..157c0dacdd 100644 --- a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/ConsumerController.scala +++ b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/ConsumerController.scala @@ -192,15 +192,15 @@ object ConsumerController { object Settings { /** - * Scala API: Factory method from config `akka.reliable-delivery.consumer-controller` + * Scala API: Factory method from config `pekko.reliable-delivery.consumer-controller` * of the `ActorSystem`. */ def apply(system: ActorSystem[_]): Settings = - apply(system.settings.config.getConfig("akka.reliable-delivery.consumer-controller")) + apply(system.settings.config.getConfig("pekko.reliable-delivery.consumer-controller")) /** * Scala API: Factory method from Config corresponding to - * `akka.reliable-delivery.consumer-controller`. + * `pekko.reliable-delivery.consumer-controller`. */ def apply(config: Config): Settings = { new Settings( @@ -211,7 +211,7 @@ object ConsumerController { } /** - * Java API: Factory method from config `akka.reliable-delivery.producer-controller` + * Java API: Factory method from config `pekko.reliable-delivery.producer-controller` * of the `ActorSystem`. */ def create(system: ActorSystem[_]): Settings = @@ -219,7 +219,7 @@ object ConsumerController { /** * Java API: Factory method from Config corresponding to - * `akka.reliable-delivery.producer-controller`. + * `pekko.reliable-delivery.producer-controller`. */ def create(config: Config): Settings = apply(config) diff --git a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/ProducerController.scala b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/ProducerController.scala index 088a86fd3e..0f54c1907d 100644 --- a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/ProducerController.scala +++ b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/ProducerController.scala @@ -145,15 +145,15 @@ object ProducerController { object Settings { /** - * Scala API: Factory method from config `akka.reliable-delivery.producer-controller` + * Scala API: Factory method from config `pekko.reliable-delivery.producer-controller` * of the `ActorSystem`. */ def apply(system: ActorSystem[_]): Settings = - apply(system.settings.config.getConfig("akka.reliable-delivery.producer-controller")) + apply(system.settings.config.getConfig("pekko.reliable-delivery.producer-controller")) /** * Scala API: Factory method from Config corresponding to - * `akka.reliable-delivery.producer-controller`. + * `pekko.reliable-delivery.producer-controller`. */ def apply(config: Config): Settings = { val chunkLargeMessagesBytes = toRootLowerCase(config.getString("chunk-large-messages")) match { @@ -169,7 +169,7 @@ object ProducerController { } /** - * Java API: Factory method from config `akka.reliable-delivery.producer-controller` + * Java API: Factory method from config `pekko.reliable-delivery.producer-controller` * of the `ActorSystem`. */ def create(system: ActorSystem[_]): Settings = @@ -177,7 +177,7 @@ object ProducerController { /** * Java API: Factory method from Config corresponding to - * `akka.reliable-delivery.producer-controller`. + * `pekko.reliable-delivery.producer-controller`. */ def create(config: Config): Settings = apply(config) diff --git a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/WorkPullingProducerController.scala b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/WorkPullingProducerController.scala index cc6198df2d..651df7e230 100644 --- a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/WorkPullingProducerController.scala +++ b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/WorkPullingProducerController.scala @@ -142,15 +142,15 @@ object WorkPullingProducerController { object Settings { /** - * Scala API: Factory method from config `akka.reliable-delivery.work-pulling.producer-controller` + * Scala API: Factory method from config `pekko.reliable-delivery.work-pulling.producer-controller` * of the `ActorSystem`. */ def apply(system: ActorSystem[_]): Settings = - apply(system.settings.config.getConfig("akka.reliable-delivery.work-pulling.producer-controller")) + apply(system.settings.config.getConfig("pekko.reliable-delivery.work-pulling.producer-controller")) /** * Scala API: Factory method from Config corresponding to - * `akka.reliable-delivery.work-pulling.producer-controller`. + * `pekko.reliable-delivery.work-pulling.producer-controller`. */ def apply(config: Config): Settings = { new Settings( @@ -160,7 +160,7 @@ object WorkPullingProducerController { } /** - * Java API: Factory method from config `akka.reliable-delivery.work-pulling.producer-controller` + * Java API: Factory method from config `pekko.reliable-delivery.work-pulling.producer-controller` * of the `ActorSystem`. */ def create(system: ActorSystem[_]): Settings = @@ -168,7 +168,7 @@ object WorkPullingProducerController { /** * Java API: Factory method from Config corresponding to - * `akka.reliable-delivery.work-pulling.producer-controller`. + * `pekko.reliable-delivery.work-pulling.producer-controller`. */ def create(config: Config): Settings = apply(config) diff --git a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/internal/ExtensionsImpl.scala b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/internal/ExtensionsImpl.scala index 6eb8463ce4..d69e98e234 100644 --- a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/internal/ExtensionsImpl.scala +++ b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/internal/ExtensionsImpl.scala @@ -63,8 +63,8 @@ private[pekko] trait ExtensionsImpl extends Extensions { self: ActorSystem[_] wi } } - loadExtensions("akka.actor.typed.library-extensions", throwOnLoadFail = true) - loadExtensions("akka.actor.typed.extensions", throwOnLoadFail = false) + loadExtensions("pekko.actor.typed.library-extensions", throwOnLoadFail = true) + loadExtensions("pekko.actor.typed.extensions", throwOnLoadFail = false) } final override def hasExtension(ext: ExtensionId[_ <: Extension]): Boolean = findExtension(ext) != null diff --git a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/internal/adapter/ActorSystemAdapter.scala b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/internal/adapter/ActorSystemAdapter.scala index fd42d56dda..f412721ca7 100644 --- a/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/internal/adapter/ActorSystemAdapter.scala +++ b/akka-actor-typed/src/main/scala/org/apache/pekko/actor/typed/internal/adapter/ActorSystemAdapter.scala @@ -146,7 +146,7 @@ private[pekko] object ActorSystemAdapter { /** * A classic extension to load configured typed extensions. It is loaded via - * akka.library-extensions. `loadExtensions` cannot be called from the AdapterExtension + * pekko.library-extensions. `loadExtensions` cannot be called from the AdapterExtension * directly because the adapter is created too early during typed actor system creation. * * When on the classpath typed extensions will be loaded for classic ActorSystems as well. diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index 0fd844cc0f..7b88c7633f 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -1,15 +1,15 @@ #################################### -# Akka Actor Reference Config File # +# Pekko Actor Reference Config File # #################################### # This is the reference config file that contains all the default settings. # Make your edits/overrides in your application.conf. -# Akka version, checked against the runtime version of Akka. Loaded from generated conf file. +# Pekko version, checked against the runtime version of Pekko. Loaded from generated conf file. include "version" -akka { - # Home directory of Akka, modules in the deploy directory will be loaded +pekko { + # Home directory of Pekko, modules in the deploy directory will be loaded home = "" # Loggers to register at boot time (org.apache.pekko.event.Logging$DefaultLogger logs @@ -26,7 +26,7 @@ akka { logging-filter = "org.apache.pekko.event.DefaultLoggingFilter" # Specifies the default loggers dispatcher - loggers-dispatcher = "akka.actor.default-dispatcher" + loggers-dispatcher = "pekko.actor.default-dispatcher" # Loggers are created and registered synchronously during ActorSystem # start-up, and since they are actors, this timeout is used to bound the @@ -73,11 +73,11 @@ akka { # # Should not be set by end user applications in 'application.conf', use the extensions property for that # - library-extensions = ${?akka.library-extensions} ["org.apache.pekko.serialization.SerializationExtension$"] + library-extensions = ${?pekko.library-extensions} ["org.apache.pekko.serialization.SerializationExtension$"] # List FQCN of extensions which shall be loaded at actor system startup. # Should be on the format: 'extensions = ["foo", "bar"]' etc. - # See the Akka Documentation for more info about Extensions + # See the Pekko Documentation for more info about Extensions extensions = [] # Toggles whether threads created by this ActorSystem should be daemons or not @@ -87,9 +87,9 @@ akka { # such as OutOfMemoryError jvm-exit-on-fatal-error = on - # Akka installs JVM shutdown hooks by default, e.g. in CoordinatedShutdown and Artery. This property will + # Pekko installs JVM shutdown hooks by default, e.g. in CoordinatedShutdown and Artery. This property will # not disable user-provided hooks registered using `CoordinatedShutdown#addCancellableJvmShutdownHook`. - # This property is related to `akka.coordinated-shutdown.run-by-jvm-shutdown-hook` below. + # This property is related to `pekko.coordinated-shutdown.run-by-jvm-shutdown-hook` below. # This property makes it possible to disable all such hooks if the application itself # or a higher level framework such as Play prefers to install the JVM shutdown hook and # terminate the ActorSystem itself, with or without using CoordinatedShutdown. @@ -111,7 +111,7 @@ akka { # Either one of "local", "remote" or "cluster" or the # FQCN of the ActorRefProvider to be used; the below is the built-in default, - # note that "remote" and "cluster" requires the akka-remote and akka-cluster + # note that "remote" and "cluster" requires the pekko-remote and pekko-cluster # artifacts to be on the classpath. provider = "local" @@ -351,7 +351,7 @@ akka { } "/IO-DNS/inet-address/*" { - dispatcher = "akka.actor.default-blocking-io-dispatcher" + dispatcher = "pekko.actor.default-blocking-io-dispatcher" } "/IO-DNS/async-dns" { @@ -554,7 +554,7 @@ akka { mailbox-requirement = "" } - # Default separate internal dispatcher to run Akka internal tasks and actors on + # Default separate internal dispatcher to run Pekko internal tasks and actors on # protecting them against starvation because of accidental blocking in user actors (which run on the # default dispatcher) internal-dispatcher { @@ -612,25 +612,25 @@ akka { # up a mailbox configuration via T in this mapping requirements { "org.apache.pekko.dispatch.UnboundedMessageQueueSemantics" = - akka.actor.mailbox.unbounded-queue-based + pekko.actor.mailbox.unbounded-queue-based "org.apache.pekko.dispatch.BoundedMessageQueueSemantics" = - akka.actor.mailbox.bounded-queue-based + pekko.actor.mailbox.bounded-queue-based "org.apache.pekko.dispatch.DequeBasedMessageQueueSemantics" = - akka.actor.mailbox.unbounded-deque-based + pekko.actor.mailbox.unbounded-deque-based "org.apache.pekko.dispatch.UnboundedDequeBasedMessageQueueSemantics" = - akka.actor.mailbox.unbounded-deque-based + pekko.actor.mailbox.unbounded-deque-based "org.apache.pekko.dispatch.BoundedDequeBasedMessageQueueSemantics" = - akka.actor.mailbox.bounded-deque-based + pekko.actor.mailbox.bounded-deque-based "org.apache.pekko.dispatch.MultipleConsumerSemantics" = - akka.actor.mailbox.unbounded-queue-based + pekko.actor.mailbox.unbounded-queue-based "org.apache.pekko.dispatch.ControlAwareMessageQueueSemantics" = - akka.actor.mailbox.unbounded-control-aware-queue-based + pekko.actor.mailbox.unbounded-control-aware-queue-based "org.apache.pekko.dispatch.UnboundedControlAwareMessageQueueSemantics" = - akka.actor.mailbox.unbounded-control-aware-queue-based + pekko.actor.mailbox.unbounded-control-aware-queue-based "org.apache.pekko.dispatch.BoundedControlAwareMessageQueueSemantics" = - akka.actor.mailbox.bounded-control-aware-queue-based + pekko.actor.mailbox.bounded-control-aware-queue-based "org.apache.pekko.event.LoggerMessageQueueSemantics" = - akka.actor.mailbox.logger-queue + pekko.actor.mailbox.logger-queue } unbounded-queue-based { @@ -685,7 +685,7 @@ akka { debug { # enable function of Actor.loggable(), which is to log any received message - # at DEBUG level, see the “Testing Actor Systems” section of the Akka + # at DEBUG level, see the “Testing Actor Systems” section of the Pekko # Documentation at https://akka.io/docs receive = off @@ -715,7 +715,7 @@ akka { # - using DisabledJavaSerializer instead of JavaSerializer # # Completely disable the use of `org.apache.pekko.serialization.JavaSerialization` by the - # Akka Serialization extension, instead DisabledJavaSerializer will + # Pekko Serialization extension, instead DisabledJavaSerializer will # be inserted which will fail explicitly if attempts to use java serialization are made. # # The log messages emitted by such serializer SHOULD be treated as potential @@ -778,7 +778,7 @@ akka { # `org.apache.pekko.actor.serialization-identifiers."FQCN" = ID` # where `FQCN` is fully qualified class name of the serializer implementation # and `ID` is globally unique serializer identifier number. - # Identifier values from 0 to 40 are reserved for Akka internal usage. + # Identifier values from 0 to 40 are reserved for Pekko internal usage. serialization-identifiers { "org.apache.pekko.serialization.JavaSerializer" = 1 "org.apache.pekko.serialization.ByteArraySerializer" = 4 @@ -806,13 +806,13 @@ akka { # It can be exact class name or name of super class or interfaces (one level). # This is useful when a class is not used for serialization any more and therefore removed # from `serialization-bindings`, but should still be possible to deserialize. - allowed-classes = ${akka.serialization.protobuf.whitelist-class} + allowed-classes = ${pekko.serialization.protobuf.whitelist-class} } # Used to set the behavior of the scheduler. # Changing the default values may change the system behavior drastically so make - # sure you know what you're doing! See the Scheduler section of the Akka + # sure you know what you're doing! See the Scheduler section of the Pekko # Documentation for more details. scheduler { # The LightArrayRevolverScheduler is used as the default scheduler in the @@ -910,19 +910,19 @@ akka { # Fully qualified config path which holds the dispatcher configuration # to be used for running the select() calls in the selectors - selector-dispatcher = "akka.io.pinned-dispatcher" + selector-dispatcher = "pekko.io.pinned-dispatcher" # Fully qualified config path which holds the dispatcher configuration # for the read/write worker actors - worker-dispatcher = "akka.actor.internal-dispatcher" + worker-dispatcher = "pekko.actor.internal-dispatcher" # Fully qualified config path which holds the dispatcher configuration # for the selector management actors - management-dispatcher = "akka.actor.internal-dispatcher" + management-dispatcher = "pekko.actor.internal-dispatcher" # Fully qualified config path which holds the dispatcher configuration # on which file IO tasks are scheduled - file-io-dispatcher = "akka.actor.default-blocking-io-dispatcher" + file-io-dispatcher = "pekko.actor.default-blocking-io-dispatcher" # The maximum number of bytes (or "unlimited") to transfer in one batch # when using `WriteFile` command which uses `FileChannel.transferTo` to @@ -992,15 +992,15 @@ akka { # Fully qualified config path which holds the dispatcher configuration # to be used for running the select() calls in the selectors - selector-dispatcher = "akka.io.pinned-dispatcher" + selector-dispatcher = "pekko.io.pinned-dispatcher" # Fully qualified config path which holds the dispatcher configuration # for the read/write worker actors - worker-dispatcher = "akka.actor.internal-dispatcher" + worker-dispatcher = "pekko.actor.internal-dispatcher" # Fully qualified config path which holds the dispatcher configuration # for the selector management actors - management-dispatcher = "akka.actor.internal-dispatcher" + management-dispatcher = "pekko.actor.internal-dispatcher" } udp-connected { @@ -1048,31 +1048,31 @@ akka { # Fully qualified config path which holds the dispatcher configuration # to be used for running the select() calls in the selectors - selector-dispatcher = "akka.io.pinned-dispatcher" + selector-dispatcher = "pekko.io.pinned-dispatcher" # Fully qualified config path which holds the dispatcher configuration # for the read/write worker actors - worker-dispatcher = "akka.actor.internal-dispatcher" + worker-dispatcher = "pekko.actor.internal-dispatcher" # Fully qualified config path which holds the dispatcher configuration # for the selector management actors - management-dispatcher = "akka.actor.internal-dispatcher" + management-dispatcher = "pekko.actor.internal-dispatcher" } dns { # Fully qualified config path which holds the dispatcher configuration # for the manager and resolver router actors. - # For actual router configuration see akka.actor.deployment./IO-DNS/* - dispatcher = "akka.actor.internal-dispatcher" + # For actual router configuration see pekko.actor.deployment./IO-DNS/* + dispatcher = "pekko.actor.internal-dispatcher" - # Name of the subconfig at path akka.io.dns, see inet-address below + # Name of the subconfig at path pekko.io.dns, see inet-address below # # Change to `async-dns` to use the new "native" DNS resolver, # which is also capable of resolving SRV records. resolver = "inet-address" # To-be-deprecated DNS resolver implementation which uses the Java InetAddress to resolve DNS records. - # To be replaced by `akka.io.dns.async` which implements the DNS protocol natively and without blocking (which InetAddress does) + # To be replaced by `pekko.io.dns.async` which implements the DNS protocol natively and without blocking (which InetAddress does) inet-address { # Must implement org.apache.pekko.io.DnsProvider provider-object = "org.apache.pekko.io.InetAddressDnsProvider" @@ -1175,7 +1175,7 @@ akka { # Run the coordinated shutdown when the JVM process exits, e.g. # via kill SIGTERM signal (SIGINT ctrl-c doesn't work). - # This property is related to `akka.jvm-shutdown-hooks` above. + # This property is related to `pekko.jvm-shutdown-hooks` above. run-by-jvm-shutdown-hook = on # Run the coordinated shutdown when ActorSystem.terminate is called. @@ -1201,7 +1201,7 @@ akka { #//#coordinated-shutdown-phases # CoordinatedShutdown is enabled by default and will run the tasks that - # are added to these phases by individual Akka modules and user logic. + # are added to these phases by individual Pekko modules and user logic. # # The phases are ordered as a DAG by defining the dependencies between the phases # to make sure shutdown tasks are run in the right order. @@ -1312,7 +1312,7 @@ akka { # identify or look up the circuit breaker. # Note: Circuit breakers created without ids are not affected by this configuration. # A child configuration section with the same name as the circuit breaker identifier - # will be used, with fallback to the `akka.circuit-breaker.default` section. + # will be used, with fallback to the `pekko.circuit-breaker.default` section. circuit-breaker { # Default configuration that is used if a configuration section diff --git a/akka-actor/src/main/scala/org/apache/pekko/actor/AbstractActor.scala b/akka-actor/src/main/scala/org/apache/pekko/actor/AbstractActor.scala index 0b9400d551..8c0411445c 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/actor/AbstractActor.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/actor/AbstractActor.scala @@ -387,7 +387,7 @@ abstract class AbstractLoggingActor extends AbstractActor with ActorLogging * implements the `RequiresMessageQueue<DequeBasedMessageQueueSemantics>` marker interface. * You can override the default mailbox provided when `DequeBasedMessageQueueSemantics` are requested via config: *
- *   akka.actor.mailbox.requirements {
+ *   pekko.actor.mailbox.requirements {
  *     "org.apache.pekko.dispatch.BoundedDequeBasedMessageQueueSemantics" = your-custom-mailbox
  *   }
  * 
diff --git a/akka-actor/src/main/scala/org/apache/pekko/actor/ActorSystem.scala b/akka-actor/src/main/scala/org/apache/pekko/actor/ActorSystem.scala index 7c903754a2..8ee86950ee 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/actor/ActorSystem.scala @@ -143,8 +143,8 @@ object ProviderSelection { * @param config Configuration to use for the actor system. If no Config is given, the default reference config will be obtained from the ClassLoader. * @param defaultExecutionContext If defined the ExecutionContext will be used as the default executor inside this ActorSystem. * If no ExecutionContext is given, the system will fallback to the executor configured under - * "akka.actor.default-dispatcher.default-executor.fallback". - * @param actorRefProvider Overrides the `akka.actor.provider` setting in config, can be `local` (default), `remote` or + * "pekko.actor.default-dispatcher.default-executor.fallback". + * @param actorRefProvider Overrides the `pekko.actor.provider` setting in config, can be `local` (default), `remote` or * `cluster`. It can also be a fully qualified class name of a provider. */ final class BootstrapSetup private ( @@ -228,7 +228,7 @@ object ActorSystem { * If no ClassLoader is given, it obtains the current ClassLoader by first inspecting the current * threads' getContextClassLoader, then tries to walk the stack to find the callers class loader, then * falls back to the ClassLoader associated with the ActorSystem class. If no ExecutionContext is given, the - * system will fallback to the executor configured under "akka.actor.default-dispatcher.default-executor.fallback". + * system will fallback to the executor configured under "pekko.actor.default-dispatcher.default-executor.fallback". * Note that the given ExecutionContext will be used by all dispatchers that have been configured with * executor = "default-executor", including those that have not defined the executor setting and thereby fallback * to the default of "default-dispatcher.executor". @@ -304,7 +304,7 @@ object ActorSystem { * threads' getContextClassLoader, then tries to walk the stack to find the callers class loader, then * falls back to the ClassLoader associated with the ActorSystem class. * If an ExecutionContext is given, it will be used as the default executor inside this ActorSystem. - * If no ExecutionContext is given, the system will fallback to the executor configured under "akka.actor.default-dispatcher.default-executor.fallback". + * If no ExecutionContext is given, the system will fallback to the executor configured under "pekko.actor.default-dispatcher.default-executor.fallback". * The system will use the passed in config, or falls back to the default reference configuration using the ClassLoader. * * @see The Typesafe Config Library API Documentation @@ -325,15 +325,15 @@ object ActorSystem { * INTERNAL API * * When using Akka Typed the Slf4jLogger should be used by default. - * Looking for config property `akka.use-slf4j` (defined in akka-actor-typed) and + * Looking for config property `pekko.use-slf4j` (defined in akka-actor-typed) and * that `Slf4jLogger` (akka-slf4j) is in classpath. * Then adds `Slf4jLogger` to configured loggers and removes `DefaultLogger`. */ @InternalApi private[pekko] def amendSlf4jConfig(config: Config, dynamicAccess: DynamicAccess): Config = { val slf4jLoggerClassName = "org.apache.pekko.event.slf4j.Slf4jLogger" val slf4jLoggingFilterClassName = "org.apache.pekko.event.slf4j.Slf4jLoggingFilter" - val loggersConfKey = "akka.loggers" - val loggingFilterConfKey = "akka.logging-filter" + val loggersConfKey = "pekko.loggers" + val loggingFilterConfKey = "pekko.logging-filter" val configuredLoggers = immutableSeq(config.getStringList(loggersConfKey)) val configuredLoggingFilter = config.getString(loggingFilterConfKey) @@ -350,7 +350,7 @@ object ActorSystem { else ConfigFactory.parseString(newLoggingFilterConfStr).withFallback(config) } else { - val confKey = "akka.use-slf4j" + val confKey = "pekko.use-slf4j" if (config.hasPath(confKey) && config.getBoolean(confKey) && dynamicAccess.classIsOnClasspath( slf4jLoggerClassName)) { val newLoggers = slf4jLoggerClassName +: configuredLoggers.filterNot(_ == classOf[DefaultLogger].getName) @@ -394,13 +394,13 @@ object ActorSystem { import pekko.util.Helpers.ConfigOps - final val ConfigVersion: String = getString("akka.version") + final val ConfigVersion: String = getString("pekko.version") private final val providerSelectionSetup = setup .get[BootstrapSetup] .flatMap(_.actorRefProvider) .map(_.identifier) - .getOrElse(getString("akka.actor.provider")) + .getOrElse(getString("pekko.actor.provider")) final val ProviderSelectionType: ProviderSelection = ProviderSelection(providerSelectionSetup) @@ -408,70 +408,70 @@ object ActorSystem { final val HasCluster: Boolean = ProviderSelectionType.hasCluster - final val SupervisorStrategyClass: String = getString("akka.actor.guardian-supervisor-strategy") - final val CreationTimeout: Timeout = Timeout(config.getMillisDuration("akka.actor.creation-timeout")) - final val UnstartedPushTimeout: Timeout = Timeout(config.getMillisDuration("akka.actor.unstarted-push-timeout")) + final val SupervisorStrategyClass: String = getString("pekko.actor.guardian-supervisor-strategy") + final val CreationTimeout: Timeout = Timeout(config.getMillisDuration("pekko.actor.creation-timeout")) + final val UnstartedPushTimeout: Timeout = Timeout(config.getMillisDuration("pekko.actor.unstarted-push-timeout")) - final val AllowJavaSerialization: Boolean = getBoolean("akka.actor.allow-java-serialization") + final val AllowJavaSerialization: Boolean = getBoolean("pekko.actor.allow-java-serialization") @deprecated("Always enabled from Akka 2.6.0", "2.6.0") final val EnableAdditionalSerializationBindings: Boolean = true - final val SerializeAllMessages: Boolean = getBoolean("akka.actor.serialize-messages") - final val SerializeAllCreators: Boolean = getBoolean("akka.actor.serialize-creators") + final val SerializeAllMessages: Boolean = getBoolean("pekko.actor.serialize-messages") + final val SerializeAllCreators: Boolean = getBoolean("pekko.actor.serialize-creators") final val NoSerializationVerificationNeededClassPrefix: Set[String] = { import pekko.util.ccompat.JavaConverters._ - getStringList("akka.actor.no-serialization-verification-needed-class-prefix").asScala.toSet + getStringList("pekko.actor.no-serialization-verification-needed-class-prefix").asScala.toSet } - final val LogLevel: String = getString("akka.loglevel") - final val StdoutLogLevel: String = getString("akka.stdout-loglevel") - final val Loggers: immutable.Seq[String] = immutableSeq(getStringList("akka.loggers")) - final val LoggersDispatcher: String = getString("akka.loggers-dispatcher") - final val LoggingFilter: String = getString("akka.logging-filter") - final val LoggerStartTimeout: Timeout = Timeout(config.getMillisDuration("akka.logger-startup-timeout")) - final val LogConfigOnStart: Boolean = config.getBoolean("akka.log-config-on-start") - final val LogDeadLetters: Int = toRootLowerCase(config.getString("akka.log-dead-letters")) match { + final val LogLevel: String = getString("pekko.loglevel") + final val StdoutLogLevel: String = getString("pekko.stdout-loglevel") + final val Loggers: immutable.Seq[String] = immutableSeq(getStringList("pekko.loggers")) + final val LoggersDispatcher: String = getString("pekko.loggers-dispatcher") + final val LoggingFilter: String = getString("pekko.logging-filter") + final val LoggerStartTimeout: Timeout = Timeout(config.getMillisDuration("pekko.logger-startup-timeout")) + final val LogConfigOnStart: Boolean = config.getBoolean("pekko.log-config-on-start") + final val LogDeadLetters: Int = toRootLowerCase(config.getString("pekko.log-dead-letters")) match { case "off" | "false" => 0 case "on" | "true" => Int.MaxValue - case _ => config.getInt("akka.log-dead-letters") + case _ => config.getInt("pekko.log-dead-letters") } - final val LogDeadLettersDuringShutdown: Boolean = config.getBoolean("akka.log-dead-letters-during-shutdown") + final val LogDeadLettersDuringShutdown: Boolean = config.getBoolean("pekko.log-dead-letters-during-shutdown") final val LogDeadLettersSuspendDuration: Duration = { - val key = "akka.log-dead-letters-suspend-duration" + val key = "pekko.log-dead-letters-suspend-duration" toRootLowerCase(config.getString(key)) match { case "infinite" => Duration.Inf case _ => config.getMillisDuration(key) } } - final val AddLoggingReceive: Boolean = getBoolean("akka.actor.debug.receive") - final val DebugAutoReceive: Boolean = getBoolean("akka.actor.debug.autoreceive") - final val DebugLifecycle: Boolean = getBoolean("akka.actor.debug.lifecycle") - final val FsmDebugEvent: Boolean = getBoolean("akka.actor.debug.fsm") - final val DebugEventStream: Boolean = getBoolean("akka.actor.debug.event-stream") - final val DebugUnhandledMessage: Boolean = getBoolean("akka.actor.debug.unhandled") - final val DebugRouterMisconfiguration: Boolean = getBoolean("akka.actor.debug.router-misconfiguration") + final val AddLoggingReceive: Boolean = getBoolean("pekko.actor.debug.receive") + final val DebugAutoReceive: Boolean = getBoolean("pekko.actor.debug.autoreceive") + final val DebugLifecycle: Boolean = getBoolean("pekko.actor.debug.lifecycle") + final val FsmDebugEvent: Boolean = getBoolean("pekko.actor.debug.fsm") + final val DebugEventStream: Boolean = getBoolean("pekko.actor.debug.event-stream") + final val DebugUnhandledMessage: Boolean = getBoolean("pekko.actor.debug.unhandled") + final val DebugRouterMisconfiguration: Boolean = getBoolean("pekko.actor.debug.router-misconfiguration") - final val Home: Option[String] = config.getString("akka.home") match { + final val Home: Option[String] = config.getString("pekko.home") match { case "" => None case x => Some(x) } - final val SchedulerClass: String = getString("akka.scheduler.implementation") - final val Daemonicity: Boolean = getBoolean("akka.daemonic") - final val JvmExitOnFatalError: Boolean = getBoolean("akka.jvm-exit-on-fatal-error") - final val JvmShutdownHooks: Boolean = getBoolean("akka.jvm-shutdown-hooks") - final val FailMixedVersions: Boolean = getBoolean("akka.fail-mixed-versions") + final val SchedulerClass: String = getString("pekko.scheduler.implementation") + final val Daemonicity: Boolean = getBoolean("pekko.daemonic") + final val JvmExitOnFatalError: Boolean = getBoolean("pekko.jvm-exit-on-fatal-error") + final val JvmShutdownHooks: Boolean = getBoolean("pekko.jvm-shutdown-hooks") + final val FailMixedVersions: Boolean = getBoolean("pekko.fail-mixed-versions") final val CoordinatedShutdownTerminateActorSystem: Boolean = getBoolean( - "akka.coordinated-shutdown.terminate-actor-system") + "pekko.coordinated-shutdown.terminate-actor-system") final val CoordinatedShutdownRunByActorSystemTerminate: Boolean = getBoolean( - "akka.coordinated-shutdown.run-by-actor-system-terminate") + "pekko.coordinated-shutdown.run-by-actor-system-terminate") if (CoordinatedShutdownRunByActorSystemTerminate && !CoordinatedShutdownTerminateActorSystem) throw new ConfigurationException( - "akka.coordinated-shutdown.run-by-actor-system-terminate=on and " + - "akka.coordinated-shutdown.terminate-actor-system=off is not a supported configuration combination.") + "pekko.coordinated-shutdown.run-by-actor-system-terminate=on and " + + "pekko.coordinated-shutdown.terminate-actor-system=off is not a supported configuration combination.") - final val DefaultVirtualNodesFactor: Int = getInt("akka.actor.deployment.default.virtual-nodes-factor") + final val DefaultVirtualNodesFactor: Int = getInt("pekko.actor.deployment.default.virtual-nodes-factor") if (ConfigVersion != Version) throw new pekko.ConfigurationException( @@ -657,7 +657,7 @@ abstract class ActorSystem extends ActorRefFactory with ClassicActorSystemProvid * Terminates this actor system by running [[CoordinatedShutdown]] with reason * [[CoordinatedShutdown.ActorSystemTerminateReason]]. * - * If `akka.coordinated-shutdown.run-by-actor-system-terminate` is configured to `off` + * If `pekko.coordinated-shutdown.run-by-actor-system-terminate` is configured to `off` * it will not run `CoordinatedShutdown`, but the `ActorSystem` and its actors * will still be terminated. * @@ -843,7 +843,7 @@ private[pekko] class ActorSystemImpl( """.stripMargin.replaceAll("[\r\n]", "")) if (settings.JvmExitOnFatalError) - try logFatalError("shutting down JVM since 'akka.jvm-exit-on-fatal-error' is enabled for", cause, thread) + try logFatalError("shutting down JVM since 'pekko.jvm-exit-on-fatal-error' is enabled for", cause, thread) finally System.exit(-1) else try logFatalError("shutting down", cause, thread) @@ -1221,8 +1221,8 @@ private[pekko] class ActorSystemImpl( } } - loadExtensions("akka.library-extensions", throwOnLoadFail = true) - loadExtensions("akka.extensions", throwOnLoadFail = false) + loadExtensions("pekko.library-extensions", throwOnLoadFail = true) + loadExtensions("pekko.extensions", throwOnLoadFail = false) } override def toString: String = lookupRoot.path.root.address.toString diff --git a/akka-actor/src/main/scala/org/apache/pekko/actor/CoordinatedShutdown.scala b/akka-actor/src/main/scala/org/apache/pekko/actor/CoordinatedShutdown.scala index 77b0be2280..6d9fe8399e 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/actor/CoordinatedShutdown.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/actor/CoordinatedShutdown.scala @@ -186,7 +186,7 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi override def lookup = CoordinatedShutdown override def createExtension(system: ExtendedActorSystem): CoordinatedShutdown = { - val conf = system.settings.config.getConfig("akka.coordinated-shutdown") + val conf = system.settings.config.getConfig("pekko.coordinated-shutdown") val phases = phasesFromConfig(conf) val coord = new CoordinatedShutdown(system, phases) init(system, conf, coord) diff --git a/akka-actor/src/main/scala/org/apache/pekko/actor/Deployer.scala b/akka-actor/src/main/scala/org/apache/pekko/actor/Deployer.scala index 94af4a6327..17d36758d3 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/actor/Deployer.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/actor/Deployer.scala @@ -219,11 +219,11 @@ private[pekko] class Deployer(val settings: ActorSystem.Settings, val dynamicAcc private val resizerEnabled: Config = ConfigFactory.parseString("resizer.enabled=on") private val deployments = new AtomicReference(WildcardIndex[Deploy]()) - private val config = settings.config.getConfig("akka.actor.deployment") + private val config = settings.config.getConfig("pekko.actor.deployment") protected val default = config.getConfig("default") val routerTypeMapping: Map[String, String] = settings.config - .getConfig("akka.actor.router.type-mapping") + .getConfig("pekko.actor.router.type-mapping") .root .unwrapped .asScala diff --git a/akka-actor/src/main/scala/org/apache/pekko/actor/LightArrayRevolverScheduler.scala b/akka-actor/src/main/scala/org/apache/pekko/actor/LightArrayRevolverScheduler.scala index d62e9946ad..a188f7c48b 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/actor/LightArrayRevolverScheduler.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/actor/LightArrayRevolverScheduler.scala @@ -47,16 +47,16 @@ class LightArrayRevolverScheduler(config: Config, log: LoggingAdapter, threadFac val WheelSize = config - .getInt("akka.scheduler.ticks-per-wheel") + .getInt("pekko.scheduler.ticks-per-wheel") .requiring(ticks => (ticks & (ticks - 1)) == 0, "ticks-per-wheel must be a power of 2") val TickDuration = config - .getMillisDuration("akka.scheduler.tick-duration") + .getMillisDuration("pekko.scheduler.tick-duration") .requiring( _ >= 10.millis || !Helpers.isWindows, - "minimum supported akka.scheduler.tick-duration on Windows is 10ms") - .requiring(_ >= 1.millis, "minimum supported akka.scheduler.tick-duration is 1ms") - val ShutdownTimeout = config.getMillisDuration("akka.scheduler.shutdown-timeout") + "minimum supported pekko.scheduler.tick-duration on Windows is 10ms") + .requiring(_ >= 1.millis, "minimum supported pekko.scheduler.tick-duration is 1ms") + val ShutdownTimeout = config.getMillisDuration("pekko.scheduler.shutdown-timeout") import LightArrayRevolverScheduler._ diff --git a/akka-actor/src/main/scala/org/apache/pekko/actor/Stash.scala b/akka-actor/src/main/scala/org/apache/pekko/actor/Stash.scala index ba998d41d6..c41c120abd 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/actor/Stash.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/actor/Stash.scala @@ -45,7 +45,7 @@ import pekko.dispatch.{ * trait extends `RequiresMessageQueue[DequeBasedMessageQueueSemantics]`. * You can override the default mailbox provided when `DequeBasedMessageQueueSemantics` are requested via config: *
- *    akka.actor.mailbox.requirements {
+ *    pekko.actor.mailbox.requirements {
  *      "org.apache.pekko.dispatch.BoundedDequeBasedMessageQueueSemantics" = your-custom-mailbox
  *    }
  *  
diff --git a/akka-actor/src/main/scala/org/apache/pekko/actor/TypedActor.scala b/akka-actor/src/main/scala/org/apache/pekko/actor/TypedActor.scala index 2441839df6..7c2418ac3e 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/actor/TypedActor.scala @@ -702,7 +702,7 @@ class TypedActorExtension(val system: ExtendedActorSystem) extends TypedActorFac /** * Default timeout for typed actor methods with non-void return type */ - final val DefaultReturnTimeout = Timeout(settings.config.getMillisDuration("akka.actor.typed.timeout")) + final val DefaultReturnTimeout = Timeout(settings.config.getMillisDuration("pekko.actor.typed.timeout")) /** * Retrieves the underlying ActorRef for the supplied TypedActor proxy, or null if none found diff --git a/akka-actor/src/main/scala/org/apache/pekko/actor/dungeon/Dispatch.scala b/akka-actor/src/main/scala/org/apache/pekko/actor/dungeon/Dispatch.scala index bafdb65fa5..84e52c2ebc 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/actor/dungeon/Dispatch.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/actor/dungeon/Dispatch.scala @@ -28,7 +28,7 @@ import pekko.util.Unsafe final case class SerializationCheckFailedException private[dungeon] (msg: Object, cause: Throwable) extends AkkaException( s"Failed to serialize and deserialize message of type ${msg.getClass.getName} for testing. " + - "To avoid this error, either disable 'akka.actor.serialize-messages', mark the message with 'org.apache.pekko.actor.NoSerializationVerificationNeeded', or configure serialization to support this message", + "To avoid this error, either disable 'pekko.actor.serialize-messages', mark the message with 'org.apache.pekko.actor.NoSerializationVerificationNeeded', or configure serialization to support this message", cause) /** diff --git a/akka-actor/src/main/scala/org/apache/pekko/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/org/apache/pekko/dispatch/AbstractDispatcher.scala index 70d03f8dd7..932a443be8 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/dispatch/AbstractDispatcher.scala @@ -257,7 +257,7 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator /** * When the dispatcher no longer has any actors registered, how long will it wait until it shuts itself down, - * defaulting to your akka configs "akka.actor.default-dispatcher.shutdown-timeout" or default specified in + * defaulting to your akka configs "pekko.actor.default-dispatcher.shutdown-timeout" or default specified in * reference.conf * * INTERNAL API @@ -450,7 +450,7 @@ class DefaultExecutorServiceConfigurator( Debug( "DefaultExecutorServiceConfigurator", this.getClass, - s"Using passed in ExecutionContext as default executor for this ActorSystem. If you want to use a different executor, please specify one in akka.actor.default-dispatcher.default-executor.")) + s"Using passed in ExecutionContext as default executor for this ActorSystem. If you want to use a different executor, please specify one in pekko.actor.default-dispatcher.default-executor.")) new AbstractExecutorService with ExecutorServiceFactory with ExecutorServiceFactoryProvider { def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory = this diff --git a/akka-actor/src/main/scala/org/apache/pekko/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/org/apache/pekko/dispatch/Dispatchers.scala index a86d28f9ac..e0bab333ef 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/dispatch/Dispatchers.scala @@ -52,20 +52,20 @@ object Dispatchers { * The id of the default dispatcher, also the full key of the * configuration of the default dispatcher. */ - final val DefaultDispatcherId = "akka.actor.default-dispatcher" + final val DefaultDispatcherId = "pekko.actor.default-dispatcher" /** * The id of a default dispatcher to use for operations known to be blocking. Note that * for optimal performance you will want to isolate different blocking resources * on different thread pools. */ - final val DefaultBlockingDispatcherId: String = "akka.actor.default-blocking-io-dispatcher" + final val DefaultBlockingDispatcherId: String = "pekko.actor.default-blocking-io-dispatcher" /** * INTERNAL API */ @InternalApi - private[pekko] final val InternalDispatcherId = "akka.actor.internal-dispatcher" + private[pekko] final val InternalDispatcherId = "pekko.actor.internal-dispatcher" private val MaxDispatcherAliasDepth = 20 @@ -95,7 +95,7 @@ object Dispatchers { * A dispatcher config can also be an alias, in that case it is a config string value pointing * to the actual dispatcher config. * - * Look in `akka.actor.default-dispatcher` section of the reference.conf + * Look in `pekko.actor.default-dispatcher` section of the reference.conf * for documentation of dispatcher options. * * Not for user instantiation or extension diff --git a/akka-actor/src/main/scala/org/apache/pekko/dispatch/Mailbox.scala b/akka-actor/src/main/scala/org/apache/pekko/dispatch/Mailbox.scala index 8a5a6e54d6..a08c6b6837 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/dispatch/Mailbox.scala @@ -1035,7 +1035,7 @@ object BoundedControlAwareMailbox { /** * Trait to signal that an Actor requires a certain type of message queue semantics. * - * The mailbox type will be looked up by mapping the type T via akka.actor.mailbox.requirements in the config, + * The mailbox type will be looked up by mapping the type T via pekko.actor.mailbox.requirements in the config, * to a mailbox configuration. If no mailbox is assigned on Props or in deployment config then this one will be used. * * The queue type of the created mailbox will be checked against the type T and actor creation will fail if it doesn't diff --git a/akka-actor/src/main/scala/org/apache/pekko/dispatch/Mailboxes.scala b/akka-actor/src/main/scala/org/apache/pekko/dispatch/Mailboxes.scala index dc7d9987e1..5bcdfb256f 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/dispatch/Mailboxes.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/dispatch/Mailboxes.scala @@ -29,7 +29,7 @@ import pekko.event.Logging.Warning import pekko.util.Reflect object Mailboxes { - final val DefaultMailboxId = "akka.actor.default-mailbox" + final val DefaultMailboxId = "pekko.actor.default-mailbox" final val NoMailboxRequirement = "" final val BoundedCapacityPrefix = "bounded-capacity:" } @@ -67,7 +67,7 @@ private[pekko] class Mailboxes( private val mailboxBindings: Map[Class[_ <: Any], String] = { import pekko.util.ccompat.JavaConverters._ settings.config - .getConfig("akka.actor.mailbox.requirements") + .getConfig("pekko.actor.mailbox.requirements") .root .unwrapped .asScala @@ -82,7 +82,7 @@ private[pekko] class Mailboxes( .recover { case e => throw new ConfigurationException( - s"Type [$k] specified as akka.actor.mailbox.requirement " + + s"Type [$k] specified as pekko.actor.mailbox.requirement " + s"[$v] in config can't be loaded due to [${e.getMessage}]", e) } diff --git a/akka-actor/src/main/scala/org/apache/pekko/event/ActorClassificationUnsubscriber.scala b/akka-actor/src/main/scala/org/apache/pekko/event/ActorClassificationUnsubscriber.scala index 0ed5fdf43e..f80a493ccd 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/event/ActorClassificationUnsubscriber.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/event/ActorClassificationUnsubscriber.scala @@ -81,7 +81,7 @@ private[pekko] object ActorClassificationUnsubscriber { busName: String, unsubscribe: ActorRef => Unit, @unused debug: Boolean = false): ActorRef = { - val debug = system.settings.config.getBoolean("akka.actor.debug.event-stream") + val debug = system.settings.config.getBoolean("pekko.actor.debug.event-stream") system .asInstanceOf[ExtendedActorSystem] .systemActorOf( diff --git a/akka-actor/src/main/scala/org/apache/pekko/event/DeadLetterListener.scala b/akka-actor/src/main/scala/org/apache/pekko/event/DeadLetterListener.scala index c766ffad59..5cd4d1a867 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/event/DeadLetterListener.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/event/DeadLetterListener.scala @@ -132,8 +132,8 @@ class DeadLetterListener extends Actor { d.recipient.path.toString, d.recipient.getClass, logMessage + - "This logging can be turned off or adjusted with configuration settings 'akka.log-dead-letters' " + - "and 'akka.log-dead-letters-during-shutdown'.", + "This logging can be turned off or adjusted with configuration settings 'pekko.log-dead-letters' " + + "and 'pekko.log-dead-letters-during-shutdown'.", Logging.emptyMDC, ActorLogMarker.deadLetter(messageStr))) } diff --git a/akka-actor/src/main/scala/org/apache/pekko/event/EventBus.scala b/akka-actor/src/main/scala/org/apache/pekko/event/EventBus.scala index c11512309a..3e153a2cff 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/event/EventBus.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/event/EventBus.scala @@ -19,7 +19,7 @@ import pekko.util.Index * Represents the base type for EventBuses * Internally has an Event type, a Classifier type and a Subscriber type * - * For the Java API, see akka.event.japi.* + * For the Java API, see pekko.event.japi.* */ trait EventBus { type Event diff --git a/akka-actor/src/main/scala/org/apache/pekko/event/EventStreamUnsubscriber.scala b/akka-actor/src/main/scala/org/apache/pekko/event/EventStreamUnsubscriber.scala index ca10fc4831..1474879c75 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/event/EventStreamUnsubscriber.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/event/EventStreamUnsubscriber.scala @@ -75,7 +75,7 @@ private[pekko] object EventStreamUnsubscriber { Props(classOf[EventStreamUnsubscriber], eventStream, debug).withDispatcher(Dispatchers.InternalDispatcherId) def start(system: ActorSystem, stream: EventStream) = { - val debug = system.settings.config.getBoolean("akka.actor.debug.event-stream") + val debug = system.settings.config.getBoolean("pekko.actor.debug.event-stream") val unsubscriber = system .asInstanceOf[ExtendedActorSystem] .systemActorOf(props(stream, debug), "eventStreamUnsubscriber-" + unsubscribersCount.incrementAndGet()) diff --git a/akka-actor/src/main/scala/org/apache/pekko/event/Logging.scala b/akka-actor/src/main/scala/org/apache/pekko/event/Logging.scala index 4e7f049e5b..d8a3901a47 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/event/Logging.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/event/Logging.scala @@ -85,7 +85,7 @@ trait LoggingBus extends ActorEventBus { new LoggerException, simpleName(this), this.getClass, - "unknown akka.stdout-loglevel " + config.StdoutLogLevel)) + "unknown pekko.stdout-loglevel " + config.StdoutLogLevel)) ErrorLevel } AllLogLevels.filter(level >= _).foreach(l => subscribe(StandardOutLogger, classFor(l))) @@ -111,7 +111,7 @@ trait LoggingBus extends ActorEventBus { val level = levelFor(system.settings.LogLevel).getOrElse { // only log initialization errors directly with StandardOutLogger.print StandardOutLogger.print( - Error(new LoggerException, logName, this.getClass, "unknown akka.loglevel " + system.settings.LogLevel)) + Error(new LoggerException, logName, this.getClass, "unknown pekko.loglevel " + system.settings.LogLevel)) ErrorLevel } try { @@ -420,7 +420,7 @@ object LogSource { * Logging is configured by setting (some of) the following: * *

- * akka {
+ * pekko {
  *   loggers = ["org.apache.pekko.slf4j.Slf4jLogger"] # for example
  *   loglevel = "INFO"        # used when normal logging ("loggers") has been started
  *   stdout-loglevel = "WARN" # used during application start-up until normal logging is available
@@ -1101,7 +1101,7 @@ object Logging {
    * output. This logger is always attached first in order to be able to log
    * failures during application start-up, even before normal logging is
    * started. Its log level can be defined by configuration setting
-   * akka.stdout-loglevel.
+   * pekko.stdout-loglevel.
    */
   class StandardOutLogger extends MinimalActorRef with StdOutLogger {
     val path: ActorPath = RootActorPath(Address("akka", "all-systems"), "/StandardOutLogger")
@@ -1129,7 +1129,7 @@ object Logging {
 
   /**
    * Actor wrapper around the standard output logger. If
-   * akka.loggers is not set, it defaults to just this
+   * pekko.loggers is not set, it defaults to just this
    * logger.
    */
   class DefaultLogger extends Actor with StdOutLogger with RequiresMessageQueue[LoggerMessageQueueSemantics] {
diff --git a/akka-actor/src/main/scala/org/apache/pekko/event/LoggingReceive.scala b/akka-actor/src/main/scala/org/apache/pekko/event/LoggingReceive.scala
index 6b75e2d3ea..704af2c790 100644
--- a/akka-actor/src/main/scala/org/apache/pekko/event/LoggingReceive.scala
+++ b/akka-actor/src/main/scala/org/apache/pekko/event/LoggingReceive.scala
@@ -30,7 +30,7 @@ object LoggingReceive {
    * 
* * This method does NOT modify the given Receive unless - * `akka.actor.debug.receive` is set in configuration. + * `pekko.actor.debug.receive` is set in configuration. */ def apply(r: Receive)(implicit context: ActorContext): Receive = withLabel(null)(r) diff --git a/akka-actor/src/main/scala/org/apache/pekko/io/Dns.scala b/akka-actor/src/main/scala/org/apache/pekko/io/Dns.scala index 70cb842f21..52e3fae2c5 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/io/Dns.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/io/Dns.scala @@ -36,7 +36,7 @@ abstract class Dns { /** * Lookup if a DNS resolved is cached. The exact behavior of caching will depend on - * the akka.actor.io.dns.resolver that is configured. + * the pekko.actor.io.dns.resolver that is configured. */ @deprecated("Use cached(DnsProtocol.Resolve)", "2.6.0") def cached(@unused name: String): Option[Dns.Resolved] = None @@ -106,7 +106,7 @@ object Dns extends ExtensionId[DnsExt] with ExtensionIdProvider { /** * Lookup if a DNS resolved is cached. The exact behavior of caching will depend on - * the akka.actor.io.dns.resolver that is configured. + * the pekko.actor.io.dns.resolver that is configured. */ @deprecated("use cached(DnsProtocol.Resolve)", "2.6.0") def cached(name: String)(system: ActorSystem): Option[Resolved] = { @@ -125,7 +125,7 @@ object Dns extends ExtensionId[DnsExt] with ExtensionIdProvider { /** * Lookup if a DNS resolved is cached. The exact behavior of caching will depend on - * the akka.actor.io.dns.resolver that is configured. + * the pekko.actor.io.dns.resolver that is configured. */ def cached(name: DnsProtocol.Resolve)(system: ActorSystem): Option[DnsProtocol.Resolved] = { Dns(system).cache.cached(name) @@ -173,7 +173,7 @@ class DnsExt private[pekko] (val system: ExtendedActorSystem, resolverName: Stri new JFunction[String, ActorRef] { override def apply(r: String): ActorRef = { val settings = - new Settings(system.settings.config.getConfig("akka.io.dns"), "async-dns") + new Settings(system.settings.config.getConfig("pekko.io.dns"), "async-dns") val provider = system.dynamicAccess.createInstanceFor[DnsProvider](settings.ProviderObjectName, Nil).get Logging(system, classOf[DnsExt]) .info("Creating async dns resolver {} with manager name {}", settings.Resolver, managerName) @@ -201,7 +201,7 @@ class DnsExt private[pekko] (val system: ExtendedActorSystem, resolverName: Stri */ @InternalApi def this(system: ExtendedActorSystem) = - this(system, system.settings.config.getString("akka.io.dns.resolver"), "IO-DNS") + this(system, system.settings.config.getString("pekko.io.dns.resolver"), "IO-DNS") class Settings private[DnsExt] (config: Config, resolverName: String) { @@ -219,7 +219,7 @@ class DnsExt private[pekko] (val system: ExtendedActorSystem, resolverName: Stri } // Settings for the system resolver - val Settings: Settings = new Settings(system.settings.config.getConfig("akka.io.dns"), resolverName) + val Settings: Settings = new Settings(system.settings.config.getConfig("pekko.io.dns"), resolverName) // System DNS resolver @nowarn("msg=deprecated") diff --git a/akka-actor/src/main/scala/org/apache/pekko/io/InetAddressDnsResolver.scala b/akka-actor/src/main/scala/org/apache/pekko/io/InetAddressDnsResolver.scala index 43c3c2939a..f0bb869b4f 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/io/InetAddressDnsResolver.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/io/InetAddressDnsResolver.scala @@ -93,7 +93,7 @@ class InetAddressDnsResolver(cache: SimpleDnsCache, config: Config) extends Acto case _ => { val finiteTtl = config .getDuration(path, TimeUnit.SECONDS) - .requiring(_ > 0, s"akka.io.dns.$path must be 'default', 'forever', 'never' or positive duration") + .requiring(_ > 0, s"pekko.io.dns.$path must be 'default', 'forever', 'never' or positive duration") Ttl.fromPositive(finiteTtl.seconds) } } diff --git a/akka-actor/src/main/scala/org/apache/pekko/io/Tcp.scala b/akka-actor/src/main/scala/org/apache/pekko/io/Tcp.scala index 02b9f9d801..1b4f069012 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/io/Tcp.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/io/Tcp.scala @@ -596,7 +596,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { class TcpExt(system: ExtendedActorSystem) extends IO.Extension { - val Settings = new Settings(system.settings.config.getConfig("akka.io.tcp")) + val Settings = new Settings(system.settings.config.getConfig("pekko.io.tcp")) class Settings private[TcpExt] (_config: Config) extends SelectionHandlerSettings(_config) { import _config._ diff --git a/akka-actor/src/main/scala/org/apache/pekko/io/TcpManager.scala b/akka-actor/src/main/scala/org/apache/pekko/io/TcpManager.scala index f495954f0b..c012338aba 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/io/TcpManager.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/io/TcpManager.scala @@ -24,7 +24,7 @@ import pekko.actor.{ ActorLogging, Props } * listening to server events). To unbind the port an [[pekko.io.Tcp.Unbind]] message must be sent to the Listener actor. * * If the bind request is rejected because the Tcp system is not able to register more channels (see the nr-of-selectors - * and max-channels configuration options in the akka.io.tcp section of the configuration) the sender will be notified + * and max-channels configuration options in the pekko.io.tcp section of the configuration) the sender will be notified * with a [[pekko.io.Tcp.CommandFailed]] message. This message contains the original command for reference. * * When an inbound TCP connection is established, the handler will be notified by a [[pekko.io.Tcp.Connected]] message. @@ -42,7 +42,7 @@ import pekko.actor.{ ActorLogging, Props } * to the Connection actor. * * If the connect request is rejected because the Tcp system is not able to register more channels (see the nr-of-selectors - * and max-channels configuration options in the akka.io.tcp section of the configuration) the sender will be notified + * and max-channels configuration options in the pekko.io.tcp section of the configuration) the sender will be notified * with a [[pekko.io.Tcp.CommandFailed]] message. This message contains the original command for reference. */ private[io] class TcpManager(tcp: TcpExt) diff --git a/akka-actor/src/main/scala/org/apache/pekko/io/TcpOutgoingConnection.scala b/akka-actor/src/main/scala/org/apache/pekko/io/TcpOutgoingConnection.scala index dcc039706f..0dbe0bd7f3 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/io/TcpOutgoingConnection.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/io/TcpOutgoingConnection.scala @@ -122,7 +122,7 @@ private[io] class TcpOutgoingConnection( } else { log.debug( "Could not establish connection because finishConnect " + - "never returned true (consider increasing akka.io.tcp.finish-connect-retries)") + "never returned true (consider increasing pekko.io.tcp.finish-connect-retries)") stop(FinishConnectNeverReturnedTrueException) } } diff --git a/akka-actor/src/main/scala/org/apache/pekko/io/Udp.scala b/akka-actor/src/main/scala/org/apache/pekko/io/Udp.scala index c761d74f7e..ce71a6bbca 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/io/Udp.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/io/Udp.scala @@ -222,7 +222,7 @@ class UdpExt(system: ExtendedActorSystem) extends IO.Extension { import Udp.UdpSettings - val settings: UdpSettings = new UdpSettings(system.settings.config.getConfig("akka.io.udp")) + val settings: UdpSettings = new UdpSettings(system.settings.config.getConfig("pekko.io.udp")) val manager: ActorRef = { system.systemActorOf( diff --git a/akka-actor/src/main/scala/org/apache/pekko/io/UdpConnected.scala b/akka-actor/src/main/scala/org/apache/pekko/io/UdpConnected.scala index f8be2a4e52..ff28edc632 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/io/UdpConnected.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/io/UdpConnected.scala @@ -160,7 +160,7 @@ object UdpConnected extends ExtensionId[UdpConnectedExt] with ExtensionIdProvide class UdpConnectedExt(system: ExtendedActorSystem) extends IO.Extension { - val settings: UdpSettings = new UdpSettings(system.settings.config.getConfig("akka.io.udp-connected")) + val settings: UdpSettings = new UdpSettings(system.settings.config.getConfig("pekko.io.udp-connected")) val manager: ActorRef = { system.systemActorOf( diff --git a/akka-actor/src/main/scala/org/apache/pekko/io/UdpManager.scala b/akka-actor/src/main/scala/org/apache/pekko/io/UdpManager.scala index 60b73baf27..3cec1f5347 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/io/UdpManager.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/io/UdpManager.scala @@ -27,7 +27,7 @@ import pekko.io.Udp._ * listening to server events). To unbind the port an [[pekko.io.Tcp.Unbind]] message must be sent to the Listener actor. * * If the bind request is rejected because the Udp system is not able to register more channels (see the nr-of-selectors - * and max-channels configuration options in the akka.io.udp section of the configuration) the sender will be notified + * and max-channels configuration options in the pekko.io.udp section of the configuration) the sender will be notified * with a [[pekko.io.Udp.CommandFailed]] message. This message contains the original command for reference. * * The handler provided in the [[pekko.io.Udp.Bind]] message will receive inbound datagrams to the bound port diff --git a/akka-actor/src/main/scala/org/apache/pekko/io/dns/DnsProtocol.scala b/akka-actor/src/main/scala/org/apache/pekko/io/dns/DnsProtocol.scala index 95ed1ccde0..9107b71f6b 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/io/dns/DnsProtocol.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/io/dns/DnsProtocol.scala @@ -21,7 +21,7 @@ import pekko.util.ccompat.JavaConverters._ /** * Supersedes [[pekko.io.Dns]] protocol. * - * Note that one MUST configure `akka.io.dns.resolver = async-dns` to make use of this protocol and resolver. + * Note that one MUST configure `pekko.io.dns.resolver = async-dns` to make use of this protocol and resolver. * * Allows for more detailed lookups, by specifying which records should be checked, * and responses can more information than plain IP addresses (e.g. ports for SRV records). diff --git a/akka-actor/src/main/scala/org/apache/pekko/io/dns/DnsSettings.scala b/akka-actor/src/main/scala/org/apache/pekko/io/dns/DnsSettings.scala index bcb32f07c2..6229629a23 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/io/dns/DnsSettings.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/io/dns/DnsSettings.scala @@ -65,7 +65,7 @@ private[dns] final class DnsSettings(system: ExtendedActorSystem, c: Config) { case _ => val finiteTtl = c .getDuration(path) - .requiring(!_.isNegative, s"akka.io.dns.$path must be 'default', 'forever', 'never' or positive duration") + .requiring(!_.isNegative, s"pekko.io.dns.$path must be 'default', 'forever', 'never' or positive duration") Ttl.fromPositive(finiteTtl) } @@ -125,7 +125,7 @@ private[dns] final class DnsSettings(system: ExtendedActorSystem, c: Config) { def failUnableToDetermineDefaultNameservers = throw new IllegalStateException( "Unable to obtain default nameservers from JNDI or via reflection. " + - "Please set `akka.io.dns.async-dns.nameservers` explicitly in order to be able to resolve domain names. ") + "Please set `pekko.io.dns.async-dns.nameservers` explicitly in order to be able to resolve domain names. ") } diff --git a/akka-actor/src/main/scala/org/apache/pekko/io/dns/internal/RecordTypeSerializer.scala b/akka-actor/src/main/scala/org/apache/pekko/io/dns/internal/RecordTypeSerializer.scala index ce53351db0..809a9b2ec4 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/io/dns/internal/RecordTypeSerializer.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/io/dns/internal/RecordTypeSerializer.scala @@ -13,7 +13,7 @@ import pekko.util.{ ByteIterator, ByteStringBuilder, OptionVal } */ private[pekko] object RecordTypeSerializer { - // TODO other type than ByteStringBuilder? (was used in akka-dns) + // TODO other type than ByteStringBuilder? (was used in pekko-dns) def write(out: ByteStringBuilder, value: RecordType): Unit = { out.putShort(value.code) } diff --git a/akka-actor/src/main/scala/org/apache/pekko/pattern/CircuitBreakersRegistry.scala b/akka-actor/src/main/scala/org/apache/pekko/pattern/CircuitBreakersRegistry.scala index d012d942e2..aa8ea3641f 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/pattern/CircuitBreakersRegistry.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/pattern/CircuitBreakersRegistry.scala @@ -55,7 +55,7 @@ final class CircuitBreakersRegistry(system: ExtendedActorSystem) extends Extensi private val breakers = new ConcurrentHashMap[String, CircuitBreaker] - private val config = system.settings.config.getConfig("akka.circuit-breaker") + private val config = system.settings.config.getConfig("pekko.circuit-breaker") private val defaultBreakerConfig = config.getConfig("default") private def createCircuitBreaker(id: String): CircuitBreaker = { diff --git a/akka-actor/src/main/scala/org/apache/pekko/pattern/internal/CircuitBreakerTelemetry.scala b/akka-actor/src/main/scala/org/apache/pekko/pattern/internal/CircuitBreakerTelemetry.scala index 5569e2befd..0f14ef9683 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/pattern/internal/CircuitBreakerTelemetry.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/pattern/internal/CircuitBreakerTelemetry.scala @@ -18,7 +18,7 @@ import pekko.util.ccompat.JavaConverters._ * and [[ExtendedActorSystem]]. To setup your implementation, add a setting in your `application.conf`: * * {{{ - * akka.circuit-breaker.telemetry.implementations += com.example.MyMetrics + * pekko.circuit-breaker.telemetry.implementations += com.example.MyMetrics * }}} */ @InternalStableApi @@ -80,7 +80,7 @@ trait CircuitBreakerTelemetry { */ @InternalApi private[pekko] object CircuitBreakerTelemetryProvider { def start(breakerId: String, system: ExtendedActorSystem): CircuitBreakerTelemetry = { - val configPath = "akka.circuit-breaker.telemetry.implementations" + val configPath = "pekko.circuit-breaker.telemetry.implementations" if (!system.settings.config.hasPath(configPath)) { CircuitBreakerNoopTelemetry } else { diff --git a/akka-actor/src/main/scala/org/apache/pekko/routing/Balancing.scala b/akka-actor/src/main/scala/org/apache/pekko/routing/Balancing.scala index 6e3bb9cbae..1a29065ff1 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/routing/Balancing.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/routing/Balancing.scala @@ -116,7 +116,7 @@ final case class BalancingPool( if (!dispatchers.hasDispatcher(dispatcherId)) { // dynamically create the config and register the dispatcher configurator for the // dispatcher of this pool - val deployDispatcherConfigPath = s"akka.actor.deployment.$deployPath.pool-dispatcher" + val deployDispatcherConfigPath = s"pekko.actor.deployment.$deployPath.pool-dispatcher" val systemConfig = context.system.settings.config val dispatcherConfig = context.system.dispatchers.config( dispatcherId, diff --git a/akka-actor/src/main/scala/org/apache/pekko/routing/OptimalSizeExploringResizer.scala b/akka-actor/src/main/scala/org/apache/pekko/routing/OptimalSizeExploringResizer.scala index ebf0f450e5..a50b7adcdc 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/routing/OptimalSizeExploringResizer.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/routing/OptimalSizeExploringResizer.scala @@ -114,7 +114,7 @@ case object OptimalSizeExploringResizer { * you allow, i.e. upperBound - lowerBound. * * For documentation about the parameters, see the reference.conf - - * akka.actor.deployment.default.optimal-size-exploring-resizer + * pekko.actor.deployment.default.optimal-size-exploring-resizer */ @SerialVersionUID(1L) case class DefaultOptimalSizeExploringResizer( diff --git a/akka-actor/src/main/scala/org/apache/pekko/routing/RouterConfig.scala b/akka-actor/src/main/scala/org/apache/pekko/routing/RouterConfig.scala index 715eafb058..6b3eaf40fd 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/routing/RouterConfig.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/routing/RouterConfig.scala @@ -210,7 +210,7 @@ trait Pool extends RouterConfig { private[pekko] def enrichWithPoolDispatcher(routeeProps: Props, context: ActorContext): Props = if (usePoolDispatcher && routeeProps.dispatcher == Dispatchers.DefaultDispatcherId) routeeProps.withDispatcher( - "akka.actor.deployment." + context.self.path.elements.drop(1).mkString("/", "/", "") + "pekko.actor.deployment." + context.self.path.elements.drop(1).mkString("/", "/", "") + ".pool-dispatcher") else routeeProps @@ -313,7 +313,7 @@ class FromConfig( throw new UnsupportedOperationException("FromConfig must not create RouterActor") override def verifyConfig(path: ActorPath): Unit = - throw new ConfigurationException(s"Configuration missing for router [$path] in 'akka.actor.deployment' section.") + throw new ConfigurationException(s"Configuration missing for router [$path] in 'pekko.actor.deployment' section.") /** * Setting the supervisor strategy to be used for the “head” Router actor. diff --git a/akka-actor/src/main/scala/org/apache/pekko/serialization/Serialization.scala b/akka-actor/src/main/scala/org/apache/pekko/serialization/Serialization.scala index 462bd355ad..bab80a9310 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/serialization/Serialization.scala @@ -41,9 +41,9 @@ object Serialization { @InternalApi private[pekko] val currentTransportInformation = new DynamicVariable[Information](null) class Settings(val config: Config) { - val Serializers: Map[String, String] = configToMap(config.getConfig("akka.actor.serializers")) + val Serializers: Map[String, String] = configToMap(config.getConfig("pekko.actor.serializers")) val SerializationBindings: Map[String, String] = { - val bindings = config.getConfig("akka.actor.serialization-bindings") + val bindings = config.getConfig("pekko.actor.serialization-bindings") configToMap(bindings) } @@ -184,7 +184,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { throw new NotSerializableException( s"Cannot find serializer with id [$serializerId]${clazz.map(c => " (class [" + c.getName + "])").getOrElse("")}. " + "The most probable reason is that the configuration entry " + - "akka.actor.serializers is not in sync between the two systems.") + "pekko.actor.serializers is not in sync between the two systems.") } withTransportInformation { () => serializer.fromBinary(bytes, clazz).asInstanceOf[T] @@ -204,7 +204,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { case _: NoSuchElementException => throw new NotSerializableException( s"Cannot find serializer with id [$serializerId] (manifest [$manifest]). The most probable reason is that the configuration entry " + - "akka.actor.serializers is not in sync between the two systems.") + "pekko.actor.serializers is not in sync between the two systems.") } deserializeByteArray(bytes, serializer, manifest) } @@ -255,7 +255,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { case _: NoSuchElementException => throw new NotSerializableException( s"Cannot find serializer with id [$serializerId] (manifest [$manifest]). The most probable reason is that the configuration entry " + - "akka.actor.serializers is not in synch between the two systems.") + "pekko.actor.serializers is not in synch between the two systems.") } // not using `withTransportInformation { () =>` because deserializeByteBuffer is supposed to be the @@ -356,7 +356,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { LogMarker.Security, "Using the Java serializer for class [{}] which is not recommended because of " + "performance implications. Use another serializer or disable this warning using the setting " + - "'akka.actor.warn-about-java-serializer-usage'", + "'pekko.actor.warn-about-java-serializer-usage'", clazz.getName) } @@ -387,7 +387,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { if (!system.settings.AllowJavaSerialization && serializerFQN == classOf[JavaSerializer].getName) { log.debug( "Replacing JavaSerializer with DisabledJavaSerializer, " + - "due to `akka.actor.allow-java-serialization = off`.") + "due to `pekko.actor.allow-java-serialization = off`.") classOf[DisabledJavaSerializer].getName } else serializerFQN @@ -415,7 +415,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { case det: SerializerDetails if isDisallowedJavaSerializer(det.serializer) => log.debug( "Replacing JavaSerializer with DisabledJavaSerializer, " + - "due to `akka.actor.allow-java-serialization = off`.") + "due to `pekko.actor.allow-java-serialization = off`.") SerializerDetails(det.alias, new DisabledJavaSerializer(system), det.useFor) case det => det } @@ -549,9 +549,9 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { } private val isJavaSerializationWarningEnabled = - settings.config.getBoolean("akka.actor.warn-about-java-serializer-usage") + settings.config.getBoolean("pekko.actor.warn-about-java-serializer-usage") private val isWarningOnNoVerificationEnabled = - settings.config.getBoolean("akka.actor.warn-on-no-serialization-verification") + settings.config.getBoolean("pekko.actor.warn-on-no-serialization-verification") private def isDisallowedJavaSerializer(serializer: Serializer): Boolean = { serializer.isInstanceOf[JavaSerializer] && !system.settings.AllowJavaSerialization diff --git a/akka-actor/src/main/scala/org/apache/pekko/serialization/Serializer.scala b/akka-actor/src/main/scala/org/apache/pekko/serialization/Serializer.scala index b01931ea56..efed687e38 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/serialization/Serializer.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/serialization/Serializer.scala @@ -261,7 +261,7 @@ object BaseSerializer { * where `FQCN` is fully qualified class name of the serializer implementation * and `ID` is globally unique serializer identifier number. */ - final val SerializationIdentifiers = "akka.actor.serialization-identifiers" + final val SerializationIdentifiers = "pekko.actor.serialization-identifiers" /** INTERNAL API */ @InternalApi @@ -331,7 +331,7 @@ object JavaSerializer { class JavaSerializer(val system: ExtendedActorSystem) extends BaseSerializer { if (!system.settings.AllowJavaSerialization) throw new DisabledJavaSerializer.JavaSerializationException( - "Attempted creation of `JavaSerializer` while `akka.actor.allow-java-serialization = off` was set!") + "Attempted creation of `JavaSerializer` while `pekko.actor.allow-java-serialization = off` was set!") def includeManifest: Boolean = false @@ -370,7 +370,7 @@ final case class DisabledJavaSerializer(system: ExtendedActorSystem) extends Ser override def toBinary(o: AnyRef, buf: ByteBuffer): Unit = { log.warning( LogMarker.Security, - "Outgoing message attempted to use Java Serialization even though `akka.actor.allow-java-serialization = off` was set! " + + "Outgoing message attempted to use Java Serialization even though `pekko.actor.allow-java-serialization = off` was set! " + "Message type was: [{}]", o.getClass) throw IllegalSerialization @@ -380,7 +380,7 @@ final case class DisabledJavaSerializer(system: ExtendedActorSystem) extends Ser override def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { log.warning( LogMarker.Security, - "Incoming message attempted to use Java Serialization even though `akka.actor.allow-java-serialization = off` was set!") + "Incoming message attempted to use Java Serialization even though `pekko.actor.allow-java-serialization = off` was set!") throw IllegalDeserialization } @@ -389,7 +389,7 @@ final case class DisabledJavaSerializer(system: ExtendedActorSystem) extends Ser // we don't capture the manifest or mention it in the log as the default setting for includeManifest is set to false. log.warning( LogMarker.Security, - "Incoming message attempted to use Java Serialization even though `akka.actor.allow-java-serialization = off` was set!") + "Incoming message attempted to use Java Serialization even though `pekko.actor.allow-java-serialization = off` was set!") throw IllegalDeserialization } @@ -403,9 +403,9 @@ final case class DisabledJavaSerializer(system: ExtendedActorSystem) extends Ser object DisabledJavaSerializer { final class JavaSerializationException(msg: String) extends RuntimeException(msg) with NoStackTrace final val IllegalSerialization = new JavaSerializationException( - "Attempted to serialize message using Java serialization while `akka.actor.allow-java-serialization` was disabled. Check WARNING logs for more details.") + "Attempted to serialize message using Java serialization while `pekko.actor.allow-java-serialization` was disabled. Check WARNING logs for more details.") final val IllegalDeserialization = new JavaSerializationException( - "Attempted to deserialize message using Java serialization while `akka.actor.allow-java-serialization` was disabled. Check WARNING logs for more details.") + "Attempted to deserialize message using Java serialization while `pekko.actor.allow-java-serialization` was disabled. Check WARNING logs for more details.") } /** diff --git a/akka-actor/src/main/scala/org/apache/pekko/util/FlightRecorderLoader.scala b/akka-actor/src/main/scala/org/apache/pekko/util/FlightRecorderLoader.scala index 965592e837..0188fdc3c3 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/util/FlightRecorderLoader.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/util/FlightRecorderLoader.scala @@ -17,7 +17,7 @@ import pekko.annotation.InternalApi private[pekko] object FlightRecorderLoader { def load[T: ClassTag](casp: ClassicActorSystemProvider, fqcn: String, fallback: T): T = { val system = casp.classicSystem.asInstanceOf[ExtendedActorSystem] - if (JavaVersion.majorVersion >= 11 && system.settings.config.getBoolean("akka.java-flight-recorder.enabled")) { + if (JavaVersion.majorVersion >= 11 && system.settings.config.getBoolean("pekko.java-flight-recorder.enabled")) { // Dynamic instantiation to not trigger class load on earlier JDKs system.dynamicAccess.createInstanceFor[T](fqcn, Nil) match { case Success(jfr) => diff --git a/akka-actor/src/main/scala/org/apache/pekko/util/ManifestInfo.scala b/akka-actor/src/main/scala/org/apache/pekko/util/ManifestInfo.scala index 9eebc5d155..53fd8f3904 100644 --- a/akka-actor/src/main/scala/org/apache/pekko/util/ManifestInfo.scala +++ b/akka-actor/src/main/scala/org/apache/pekko/util/ManifestInfo.scala @@ -154,7 +154,7 @@ final class ManifestInfo(val system: ExtendedActorSystem) extends Extension { /** * Verify that the version is the same for all given artifacts. * - * If configuration `akka.fail-mixed-versions=on` it will throw an `IllegalStateException` if the + * If configuration `pekko.fail-mixed-versions=on` it will throw an `IllegalStateException` if the * versions are not the same for all given artifacts. * * @return `true` if versions are the same diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/ActorBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/ActorBenchmark.scala index bfa0afe65c..49cbc0ef3f 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/ActorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/ActorBenchmark.scala @@ -58,7 +58,7 @@ class ActorBenchmark { system = ActorSystem( "ActorBenchmark", ConfigFactory.parseString(s""" - akka.actor { + pekko.actor { default-mailbox.mailbox-capacity = 512 diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/AffinityPoolComparativeBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/AffinityPoolComparativeBenchmark.scala index f21e89319e..0a851f7c38 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/AffinityPoolComparativeBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/AffinityPoolComparativeBenchmark.scala @@ -49,7 +49,7 @@ class AffinityPoolComparativeBenchmark { system = ActorSystem( "AffinityPoolComparativeBenchmark", - ConfigFactory.parseString(s"""| akka { + ConfigFactory.parseString(s"""| pekko { | log-dead-letters = off | actor { | default-fj-dispatcher { diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/AffinityPoolIdleCPULevelBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/AffinityPoolIdleCPULevelBenchmark.scala index ca2603b017..a2e513acc6 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/AffinityPoolIdleCPULevelBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/AffinityPoolIdleCPULevelBenchmark.scala @@ -38,7 +38,7 @@ class AffinityPoolIdleCPULevelBenchmark { system = ActorSystem( "AffinityPoolWaitingStrategyBenchmark", - ConfigFactory.parseString(s""" | akka { + ConfigFactory.parseString(s""" | pekko { | log-dead-letters = off | actor { | affinity-dispatcher { diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/AffinityPoolRequestResponseBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/AffinityPoolRequestResponseBenchmark.scala index 9b850c5bae..43c0585ecd 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/AffinityPoolRequestResponseBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/AffinityPoolRequestResponseBenchmark.scala @@ -53,7 +53,7 @@ class AffinityPoolRequestResponseBenchmark { system = ActorSystem( "AffinityPoolComparativeBenchmark", - ConfigFactory.parseString(s"""| akka { + ConfigFactory.parseString(s"""| pekko { | log-dead-letters = off | actor { | default-fj-dispatcher { diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/BenchmarkActors.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/BenchmarkActors.scala index 8e92778fb2..deb216ba14 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/BenchmarkActors.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/BenchmarkActors.scala @@ -95,7 +95,7 @@ object BenchmarkActors { private def startPingPongActorPairs(messagesPerPair: Int, numPairs: Int, dispatcher: String)( implicit system: ActorSystem): (Vector[(ActorRef, ActorRef)], CountDownLatch) = { - val fullPathToDispatcher = "akka.actor." + dispatcher + val fullPathToDispatcher = "pekko.actor." + dispatcher val latch = new CountDownLatch(numPairs * 2) val actors = List .fill(numPairs) { @@ -119,7 +119,7 @@ object BenchmarkActors { private def startEchoActorPairs(messagesPerPair: Int, numPairs: Int, dispatcher: String, batchSize: Int)( implicit system: ActorSystem): (Vector[ActorRef], CountDownLatch) = { - val fullPathToDispatcher = "akka.actor." + dispatcher + val fullPathToDispatcher = "pekko.actor." + dispatcher val latch = new CountDownLatch(numPairs) val actors = (1 to numPairs).map { _ => system.actorOf(EchoSender.props(messagesPerPair, latch, batchSize).withDispatcher(fullPathToDispatcher)) diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/ForkJoinActorBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/ForkJoinActorBenchmark.scala index ca5c67ef2b..d337437371 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/ForkJoinActorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/ForkJoinActorBenchmark.scala @@ -48,7 +48,7 @@ class ForkJoinActorBenchmark { system = ActorSystem( "ForkJoinActorBenchmark", ConfigFactory.parseString(s""" - akka { + pekko { log-dead-letters = off default-mailbox.mailbox-capacity = 512 actor { diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/RequestResponseActors.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/RequestResponseActors.scala index 6b590f64d1..065def2856 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/RequestResponseActors.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/RequestResponseActors.scala @@ -73,7 +73,7 @@ object RequestResponseActors { def startUserQueryActorPairs(numActors: Int, numQueriesPerActor: Int, numUsersInDBPerActor: Int, dispatcher: String)( implicit system: ActorSystem) = { - val fullPathToDispatcher = "akka.actor." + dispatcher + val fullPathToDispatcher = "pekko.actor." + dispatcher val latch = new CountDownLatch(numActors) val actorsPairs = for { i <- (1 to (numActors / 2)).toVector diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/TellOnlyBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/TellOnlyBenchmark.scala index 17ff2014f9..a99f14c58c 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/TellOnlyBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/TellOnlyBenchmark.scala @@ -32,7 +32,7 @@ class TellOnlyBenchmark { def setup(): Unit = { system = ActorSystem( "TellOnlyBenchmark", - ConfigFactory.parseString(s"""| akka { + ConfigFactory.parseString(s"""| pekko { | log-dead-letters = off | actor { | default-dispatcher { diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/TypedActorBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/TypedActorBenchmark.scala index 05104e1e02..8feb3244f2 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/TypedActorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/TypedActorBenchmark.scala @@ -58,7 +58,7 @@ class TypedActorBenchmark { TypedBenchmarkActors.echoActorsSupervisor(numMessagesPerActorPair, numActors, dispatcher, batchSize), "TypedActorBenchmark", ConfigFactory.parseString(s""" - akka.actor { + pekko.actor { default-mailbox.mailbox-capacity = 512 diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/TypedBenchmarkActors.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/TypedBenchmarkActors.scala index 4a94008e0b..1c71c8bb60 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/TypedBenchmarkActors.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/TypedBenchmarkActors.scala @@ -89,7 +89,7 @@ object TypedBenchmarkActors { Behaviors .setup[Any] { ctx => - val props = Props.empty.withDispatcherFromConfig("akka.actor." + dispatcher) + val props = Props.empty.withDispatcherFromConfig("pekko.actor." + dispatcher) val pairs = (1 to numPairs).map { _ => ctx.spawnAnonymous(echoSender(messagesPerPair, ctx.self.narrow[Done], batchSize, props), props) } @@ -156,7 +156,7 @@ object TypedBenchmarkActors { messagesPerPair: Int, numPairs: Int, dispatcher: String): (Vector[(ActorRef[Message], ActorRef[Message])], CountDownLatch) = { - val fullPathToDispatcher = "akka.actor." + dispatcher + val fullPathToDispatcher = "pekko.actor." + dispatcher val latch = new CountDownLatch(numPairs * 2) val pingPongBehavior = newPingPongBehavior(messagesPerPair, latch) val pingPongProps = Props.empty.withDispatcherFromConfig(fullPathToDispatcher) diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/TypedForkJoinActorBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/TypedForkJoinActorBenchmark.scala index 1b572df635..2dd0b575bb 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/TypedForkJoinActorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/TypedForkJoinActorBenchmark.scala @@ -62,7 +62,7 @@ class TypedForkJoinActorBenchmark { TypedBenchmarkActors.benchmarkPingPongSupervisor(), "TypedForkJoinActorBenchmark", ConfigFactory.parseString(s""" - akka.actor { + pekko.actor { default-mailbox.mailbox-capacity = 512 diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/delivery/ReliableDeliveryBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/delivery/ReliableDeliveryBenchmark.scala index 16f004829f..5eccc283a4 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/delivery/ReliableDeliveryBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/actor/typed/delivery/ReliableDeliveryBenchmark.scala @@ -113,7 +113,7 @@ object WorkPullingProducer { val requestNextAdapter = context.messageAdapter[WorkPullingProducerController.RequestNext[Consumer.Command]](WrappedRequestNext(_)) var remaining = numberOfMessages + context.system.settings.config - .getInt("akka.reliable-delivery.consumer-controller.flow-control-window") + .getInt("pekko.reliable-delivery.consumer-controller.flow-control-window") Behaviors.receiveMessagePartial { case WrappedRequestNext(next) => @@ -219,8 +219,8 @@ class ReliableDeliveryBenchmark { Guardian(), "ReliableDeliveryBenchmark", ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.reliable-delivery { + pekko.loglevel = INFO + pekko.reliable-delivery { consumer-controller.flow-control-window = $window } """)) diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/cluster/ddata/ORSetSerializationBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/cluster/ddata/ORSetSerializationBenchmark.scala index 73898d8fb7..6c23a8d5fc 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/cluster/ddata/ORSetSerializationBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/cluster/ddata/ORSetSerializationBenchmark.scala @@ -38,9 +38,9 @@ import pekko.serialization.Serializers class ORSetSerializationBenchmark { private val config = ConfigFactory.parseString(""" - akka.actor.provider=cluster - akka.remote.classic.netty.tcp.port=0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider=cluster + pekko.remote.classic.netty.tcp.port=0 + pekko.remote.artery.canonical.port = 0 """) private val system1 = ActorSystem("ORSetSerializationBenchmark", config) diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/dispatch/CachingConfigBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/dispatch/CachingConfigBenchmark.scala index c74b312263..ad2c4190bd 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/dispatch/CachingConfigBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/dispatch/CachingConfigBenchmark.scala @@ -17,7 +17,7 @@ import org.openjdk.jmh.annotations._ @OutputTimeUnit(TimeUnit.MILLISECONDS) class CachingConfigBenchmark { - val deepKey = "akka.actor.deep.settings.something" + val deepKey = "pekko.actor.deep.settings.something" val deepConfigString = s"""$deepKey = something""" val deepConfig = ConfigFactory.parseString(deepConfigString) val deepCaching = new CachingConfig(deepConfig) diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/LevelDbBatchingBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/LevelDbBatchingBenchmark.scala index 04f02c3db8..843d838c5e 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/LevelDbBatchingBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/LevelDbBatchingBenchmark.scala @@ -105,9 +105,9 @@ class LevelDbBatchingBenchmark { private def deleteStorage(sys: ActorSystem): Unit = { val storageLocations = List( - "akka.persistence.journal.leveldb.dir", - "akka.persistence.journal.leveldb-shared.store.dir", - "akka.persistence.snapshot-store.local.dir").map(s => new File(sys.settings.config.getString(s))) + "pekko.persistence.journal.leveldb.dir", + "pekko.persistence.journal.leveldb-shared.store.dir", + "pekko.persistence.snapshot-store.local.dir").map(s => new File(sys.settings.config.getString(s))) storageLocations.foreach(FileUtils.deleteDirectory) } diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/PersistenceActorDeferBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/PersistenceActorDeferBenchmark.scala index b99a425cb3..69eb5ef72b 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/PersistenceActorDeferBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/PersistenceActorDeferBenchmark.scala @@ -36,9 +36,9 @@ class PersistentActorDeferBenchmark { lazy val storageLocations = List( - "akka.persistence.journal.leveldb.dir", - "akka.persistence.journal.leveldb-shared.store.dir", - "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) + "pekko.persistence.journal.leveldb.dir", + "pekko.persistence.journal.leveldb-shared.store.dir", + "pekko.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) var system: ActorSystem = _ diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/PersistentActorBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/PersistentActorBenchmark.scala index 8288c4548f..cf5b6f55a9 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/PersistentActorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/PersistentActorBenchmark.scala @@ -25,9 +25,9 @@ class PersistentActorThroughputBenchmark { lazy val storageLocations = List( - "akka.persistence.journal.leveldb.dir", - "akka.persistence.journal.leveldb-shared.store.dir", - "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) + "pekko.persistence.journal.leveldb.dir", + "pekko.persistence.journal.leveldb-shared.store.dir", + "pekko.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) var system: ActorSystem = _ diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala index 7109381fce..42ceb49a79 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala @@ -25,9 +25,9 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark { lazy val storageLocations = List( - "akka.persistence.journal.leveldb.dir", - "akka.persistence.journal.leveldb-shared.store.dir", - "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) + "pekko.persistence.journal.leveldb.dir", + "pekko.persistence.journal.leveldb-shared.store.dir", + "pekko.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) var system: ActorSystem = _ diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/remote/artery/CodecBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/remote/artery/CodecBenchmark.scala index 282cc2f241..ccd8994bdb 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/remote/artery/CodecBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/remote/artery/CodecBenchmark.scala @@ -67,7 +67,7 @@ class CodecBenchmark { override def association(remoteAddress: Address): OutboundContext = ??? override def completeHandshake(peer: UniqueAddress): Future[Done] = ??? override lazy val settings: ArterySettings = - ArterySettings(ConfigFactory.load().getConfig("akka.remote.artery")) + ArterySettings(ConfigFactory.load().getConfig("pekko.remote.artery")) override def publishDropped(inbound: InboundEnvelope, reason: String): Unit = () } @@ -83,7 +83,7 @@ class CodecBenchmark { @Setup(Level.Trial) def setupTrial(): Unit = { val commonConfig = ConfigFactory.parseString(s""" - akka { + pekko { loglevel = WARNING actor.provider = remote remote.artery.enabled = on @@ -97,7 +97,8 @@ class CodecBenchmark { val config = configType match { case RemoteInstrument => ConfigFactory - .parseString(s"""akka.remote.artery.advanced.instruments = [ "${classOf[DummyRemoteInstrument].getName}" ]""") + .parseString( + s"""pekko.remote.artery.advanced.instruments = [ "${classOf[DummyRemoteInstrument].getName}" ]""") .withFallback(commonConfig) case _ => commonConfig diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/serialization/jackson/JacksonSerializationBench.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/serialization/jackson/JacksonSerializationBench.scala index b02dc2b127..d6eebebe59 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/serialization/jackson/JacksonSerializationBench.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/serialization/jackson/JacksonSerializationBench.scala @@ -198,7 +198,7 @@ class JacksonSerializationBench { @Setup(Level.Trial) def setupTrial(): Unit = { val config = ConfigFactory.parseString(s""" - akka { + pekko { loglevel = WARNING actor { serialization-bindings { @@ -212,7 +212,7 @@ class JacksonSerializationBench { } } } - akka.serialization.jackson.jackson-json.compression { + pekko.serialization.jackson.jackson-json.compression { algorithm = $compression compress-larger-than = 100 b } @@ -237,7 +237,7 @@ class JacksonSerializationBench { size = blob.length println( s"# Size is $size of ${msg.getClass.getName} with " + - s"${system.settings.config.getString("akka.serialization.jackson.jackson-json.compression.algorithm")}") + s"${system.settings.config.getString("pekko.serialization.jackson.jackson-json.compression.algorithm")}") } serializer.fromBinary(blob, serializer.manifest(msg)).asInstanceOf[T] case serializer => diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/AskBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/AskBenchmark.scala index d6636ded2f..2e3e43fda6 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/AskBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/AskBenchmark.scala @@ -36,7 +36,7 @@ class AskBenchmark { import AskBenchmark._ val config = ConfigFactory.parseString(""" - akka.actor.default-dispatcher { + pekko.actor.default-dispatcher { executor = "fork-join-executor" fork-join-executor { parallelism-factor = 1 diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FlatMapConcatBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FlatMapConcatBenchmark.scala index 73bf9b4269..f7e1c9b48d 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FlatMapConcatBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FlatMapConcatBenchmark.scala @@ -33,7 +33,7 @@ class FlatMapConcatBenchmark { import FlatMapConcatBenchmark._ private val config = ConfigFactory.parseString(""" - akka.actor.default-dispatcher { + pekko.actor.default-dispatcher { executor = "fork-join-executor" fork-join-executor { parallelism-factor = 1 diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FlowMapBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FlowMapBenchmark.scala index b7ab450a26..228d44f649 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FlowMapBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FlowMapBenchmark.scala @@ -27,7 +27,7 @@ import pekko.stream.scaladsl._ class FlowMapBenchmark { val config = ConfigFactory.parseString(""" - akka { + pekko { log-config-on-start = off log-dead-letters-during-shutdown = off loglevel = "WARNING" diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FramingBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FramingBenchmark.scala index a8fdb20d71..b8dfb67e22 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FramingBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FramingBenchmark.scala @@ -30,7 +30,7 @@ import pekko.util.ByteString class FramingBenchmark { val config: Config = ConfigFactory.parseString(""" - akka { + pekko { log-config-on-start = off log-dead-letters-during-shutdown = off stdout-loglevel = "OFF" diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FusedGraphsBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FusedGraphsBenchmark.scala index 7b56df65cd..8989ed6919 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FusedGraphsBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/FusedGraphsBenchmark.scala @@ -101,7 +101,7 @@ class FusedGraphsBenchmark { implicit val system: ActorSystem = ActorSystem( "test", ConfigFactory.parseString(s""" - akka.stream.materializer.sync-processing-limit = ${Int.MaxValue} + pekko.stream.materializer.sync-processing-limit = ${Int.MaxValue} """)) var testElements: Array[MutableElement] = _ diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/MapAsyncBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/MapAsyncBenchmark.scala index c9ddde1350..2300cbca55 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/MapAsyncBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/MapAsyncBenchmark.scala @@ -33,7 +33,7 @@ class MapAsyncBenchmark { import MapAsyncBenchmark._ val config = ConfigFactory.parseString(""" - akka.actor.default-dispatcher { + pekko.actor.default-dispatcher { executor = "fork-join-executor" fork-join-executor { parallelism-factor = 1 diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/PartitionHubBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/PartitionHubBenchmark.scala index 74fa135d97..9db5fc29f3 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/PartitionHubBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/PartitionHubBenchmark.scala @@ -34,7 +34,7 @@ class PartitionHubBenchmark { import PartitionHubBenchmark._ val config = ConfigFactory.parseString(""" - akka.actor.default-dispatcher { + pekko.actor.default-dispatcher { executor = "fork-join-executor" fork-join-executor { parallelism-factor = 1 diff --git a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/SourceRefBenchmark.scala b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/SourceRefBenchmark.scala index b844ac1ccd..ed22360b03 100644 --- a/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/SourceRefBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/org/apache/pekko/stream/SourceRefBenchmark.scala @@ -29,7 +29,7 @@ import pekko.stream.scaladsl._ class SourceRefBenchmark { val config = ConfigFactory.parseString(""" - akka { + pekko { log-config-on-start = off log-dead-letters-during-shutdown = off loglevel = "WARNING" diff --git a/akka-cluster-metrics/src/main/resources/reference.conf b/akka-cluster-metrics/src/main/resources/reference.conf index 9f665e3f5c..4a13bbda14 100644 --- a/akka-cluster-metrics/src/main/resources/reference.conf +++ b/akka-cluster-metrics/src/main/resources/reference.conf @@ -1,5 +1,5 @@ ############################################## -# Akka Cluster Metrics Reference Config File # +# Pekko Cluster Metrics Reference Config File # ############################################## # This is the reference config file that contains all the default settings. @@ -20,9 +20,9 @@ # Cluster metrics extension. # Provides periodic statistics collection and publication throughout the cluster. -akka.cluster.metrics { +pekko.cluster.metrics { # Full path of dispatcher configuration key. - dispatcher = "akka.actor.default-dispatcher" + dispatcher = "pekko.actor.default-dispatcher" # How long should any actor wait before starting the periodic tasks. periodic-tasks-initial-delay = 1s # Sigar native library extract location. @@ -85,19 +85,19 @@ akka.cluster.metrics { } # Cluster metrics extension serializers and routers. -akka.actor { +pekko.actor { # Protobuf serializer for remote cluster metrics messages. serializers { - akka-cluster-metrics = "org.apache.pekko.cluster.metrics.protobuf.MessageSerializer" + pekko-cluster-metrics = "org.apache.pekko.cluster.metrics.protobuf.MessageSerializer" } # Interface binding for remote cluster metrics messages. serialization-bindings { - "org.apache.pekko.cluster.metrics.ClusterMetricsMessage" = akka-cluster-metrics - "org.apache.pekko.cluster.metrics.AdaptiveLoadBalancingPool" = akka-cluster-metrics - "org.apache.pekko.cluster.metrics.MixMetricsSelector" = akka-cluster-metrics - "org.apache.pekko.cluster.metrics.CpuMetricsSelector$" = akka-cluster-metrics - "org.apache.pekko.cluster.metrics.HeapMetricsSelector$" = akka-cluster-metrics - "org.apache.pekko.cluster.metrics.SystemLoadAverageMetricsSelector$" = akka-cluster-metrics + "org.apache.pekko.cluster.metrics.ClusterMetricsMessage" = pekko-cluster-metrics + "org.apache.pekko.cluster.metrics.AdaptiveLoadBalancingPool" = pekko-cluster-metrics + "org.apache.pekko.cluster.metrics.MixMetricsSelector" = pekko-cluster-metrics + "org.apache.pekko.cluster.metrics.CpuMetricsSelector$" = pekko-cluster-metrics + "org.apache.pekko.cluster.metrics.HeapMetricsSelector$" = pekko-cluster-metrics + "org.apache.pekko.cluster.metrics.SystemLoadAverageMetricsSelector$" = pekko-cluster-metrics } # Globally unique metrics extension serializer identifier. serialization-identifiers { diff --git a/akka-cluster-metrics/src/main/scala/org/apache/pekko/cluster/metrics/ClusterMetricsSettings.scala b/akka-cluster-metrics/src/main/scala/org/apache/pekko/cluster/metrics/ClusterMetricsSettings.scala index bcd95713e1..7f6ef1901b 100644 --- a/akka-cluster-metrics/src/main/scala/org/apache/pekko/cluster/metrics/ClusterMetricsSettings.scala +++ b/akka-cluster-metrics/src/main/scala/org/apache/pekko/cluster/metrics/ClusterMetricsSettings.scala @@ -18,7 +18,7 @@ import pekko.util.Helpers.Requiring */ case class ClusterMetricsSettings(config: Config) { - private val cc = config.getConfig("akka.cluster.metrics") + private val cc = config.getConfig("pekko.cluster.metrics") // Extension. val MetricsDispatcher: String = cc.getString("dispatcher") diff --git a/akka-cluster-metrics/src/multi-jvm/scala/org/apache/pekko/cluster/metrics/ClusterMetricsExtensionSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/org/apache/pekko/cluster/metrics/ClusterMetricsExtensionSpec.scala index 4f991f70b6..8630d57327 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/org/apache/pekko/cluster/metrics/ClusterMetricsExtensionSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/org/apache/pekko/cluster/metrics/ClusterMetricsExtensionSpec.scala @@ -28,26 +28,26 @@ trait ClusterMetricsCommonConfig extends MultiNodeConfig { // Extract individual sigar library for every node. nodeList.foreach { role => nodeConfig(role) { - parseString(s"akka.cluster.metrics.native-library-extract-folder=$${user.dir}/target/native/" + role.name) + parseString(s"pekko.cluster.metrics.native-library-extract-folder=$${user.dir}/target/native/" + role.name) } } // Enable metrics extension in akka-cluster-metrics. def enableMetricsExtension = parseString(""" - akka.extensions=["org.apache.pekko.cluster.metrics.ClusterMetricsExtension"] - akka.cluster.metrics.collector.enabled = on + pekko.extensions=["org.apache.pekko.cluster.metrics.ClusterMetricsExtension"] + pekko.cluster.metrics.collector.enabled = on """) // Disable metrics extension in akka-cluster-metrics. def disableMetricsExtension = parseString(""" - akka.extensions=["org.apache.pekko.cluster.metrics.ClusterMetricsExtension"] - akka.cluster.metrics.collector.enabled = off + pekko.extensions=["org.apache.pekko.cluster.metrics.ClusterMetricsExtension"] + pekko.cluster.metrics.collector.enabled = off """) // Activate slf4j logging along with test listener. def customLogging = parseString(""" - akka.loggers=["org.apache.pekko.testkit.TestEventListener","org.apache.pekko.event.slf4j.Slf4jLogger"] - akka.logger-startup-timeout = 15s + pekko.loggers=["org.apache.pekko.testkit.TestEventListener","org.apache.pekko.event.slf4j.Slf4jLogger"] + pekko.logger-startup-timeout = 15s """) } diff --git a/akka-cluster-metrics/src/multi-jvm/scala/org/apache/pekko/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/org/apache/pekko/cluster/metrics/ClusterMetricsRoutingSpec.scala index e8bc32ab00..ef76040dbe 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/org/apache/pekko/cluster/metrics/ClusterMetricsRoutingSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/org/apache/pekko/cluster/metrics/ClusterMetricsRoutingSpec.scala @@ -70,7 +70,7 @@ object AdaptiveLoadBalancingRouterConfig extends MultiNodeConfig { nodeList.foreach { role => nodeConfig(role) { ConfigFactory.parseString( - s"akka.cluster.metrics.native-library-extract-folder=$${user.dir}/target/native/" + role.name) + s"pekko.cluster.metrics.native-library-extract-folder=$${user.dir}/target/native/" + role.name) } } @@ -78,12 +78,12 @@ object AdaptiveLoadBalancingRouterConfig extends MultiNodeConfig { debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" # Enable metrics estension. - akka.extensions=["org.apache.pekko.cluster.metrics.ClusterMetricsExtension"] + pekko.extensions=["org.apache.pekko.cluster.metrics.ClusterMetricsExtension"] - akka.cluster.failure-detector.acceptable-heartbeat-pause = 10s + pekko.cluster.failure-detector.acceptable-heartbeat-pause = 10s # Use rapid metrics collection. - akka.cluster.metrics { + pekko.cluster.metrics { collector { sample-interval = 1s gossip-interval = 1s @@ -92,7 +92,7 @@ object AdaptiveLoadBalancingRouterConfig extends MultiNodeConfig { } # Use metrics extension routing. - akka.actor.deployment { + pekko.actor.deployment { /router3 = { router = cluster-metrics-adaptive-pool metrics-selector = cpu diff --git a/akka-cluster-metrics/src/multi-jvm/scala/org/apache/pekko/cluster/metrics/sample/StatsSampleSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/org/apache/pekko/cluster/metrics/sample/StatsSampleSpec.scala index 56ea772e88..36f209ba4d 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/org/apache/pekko/cluster/metrics/sample/StatsSampleSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/org/apache/pekko/cluster/metrics/sample/StatsSampleSpec.scala @@ -29,9 +29,9 @@ object StatsSampleSpecConfig extends MultiNodeConfig { nodeConfig(role) { ConfigFactory.parseString(s""" # Enable metrics extension in akka-cluster-metrics. - akka.extensions=["org.apache.pekko.cluster.metrics.ClusterMetricsExtension"] + pekko.extensions=["org.apache.pekko.cluster.metrics.ClusterMetricsExtension"] # Sigar native library extract location during tests. - akka.cluster.metrics.native-library-extract-folder=target/native/${role.name} + pekko.cluster.metrics.native-library-extract-folder=target/native/${role.name} """) } } @@ -39,11 +39,11 @@ object StatsSampleSpecConfig extends MultiNodeConfig { // this configuration will be used for all nodes // note that no fixed host names and ports are used commonConfig(ConfigFactory.parseString(""" - akka.actor.provider = cluster - akka.remote.classic.log-remote-lifecycle-events = off - akka.cluster.roles = [compute] + pekko.actor.provider = cluster + pekko.remote.classic.log-remote-lifecycle-events = off + pekko.cluster.roles = [compute] #//#router-lookup-config - akka.actor.deployment { + pekko.actor.deployment { /statsService/workerRouter { router = consistent-hashing-group routees.paths = ["/user/statsWorker"] diff --git a/akka-cluster-metrics/src/test/scala/org/apache/pekko/cluster/metrics/TestUtil.scala b/akka-cluster-metrics/src/test/scala/org/apache/pekko/cluster/metrics/TestUtil.scala index 6dc0808b31..3eb545d268 100644 --- a/akka-cluster-metrics/src/test/scala/org/apache/pekko/cluster/metrics/TestUtil.scala +++ b/akka-cluster-metrics/src/test/scala/org/apache/pekko/cluster/metrics/TestUtil.scala @@ -151,33 +151,33 @@ object MetricsConfig { /** Test w/o cluster, with collection enabled. */ val defaultEnabled = """ - akka.cluster.metrics { + pekko.cluster.metrics { collector { enabled = on sample-interval = 1s gossip-interval = 1s } } - akka.actor.provider = remote - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider = remote + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """ /** Test w/o cluster, with collection disabled. */ val defaultDisabled = """ - akka.cluster.metrics { + pekko.cluster.metrics { collector { enabled = off } } - akka.actor.provider = remote - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider = remote + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """ /** Test in cluster, with manual collection activation, collector mock, fast. */ val clusterSigarMock = """ - akka.cluster.metrics { + pekko.cluster.metrics { periodic-tasks-initial-delay = 100ms collector { enabled = off @@ -187,9 +187,9 @@ object MetricsConfig { fallback = false } } - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """ } diff --git a/akka-cluster-metrics/src/test/scala/org/apache/pekko/cluster/metrics/WeightedRouteesSpec.scala b/akka-cluster-metrics/src/test/scala/org/apache/pekko/cluster/metrics/WeightedRouteesSpec.scala index b8476f06ee..c2900fc081 100644 --- a/akka-cluster-metrics/src/test/scala/org/apache/pekko/cluster/metrics/WeightedRouteesSpec.scala +++ b/akka-cluster-metrics/src/test/scala/org/apache/pekko/cluster/metrics/WeightedRouteesSpec.scala @@ -17,9 +17,9 @@ import pekko.routing.ActorSelectionRoutee import pekko.testkit.AkkaSpec class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """)) { val protocol = diff --git a/akka-cluster-metrics/src/test/scala/org/apache/pekko/cluster/metrics/protobuf/MessageSerializerSpec.scala b/akka-cluster-metrics/src/test/scala/org/apache/pekko/cluster/metrics/protobuf/MessageSerializerSpec.scala index 8f71f40f64..168fc3949a 100644 --- a/akka-cluster-metrics/src/test/scala/org/apache/pekko/cluster/metrics/protobuf/MessageSerializerSpec.scala +++ b/akka-cluster-metrics/src/test/scala/org/apache/pekko/cluster/metrics/protobuf/MessageSerializerSpec.scala @@ -12,9 +12,9 @@ import pekko.cluster.metrics._ import pekko.testkit.AkkaSpec class MessageSerializerSpec extends AkkaSpec(""" - akka.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider = cluster + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """) { val serializer = new MessageSerializer(system.asInstanceOf[ExtendedActorSystem]) diff --git a/akka-cluster-sharding-typed/src/main/resources/reference.conf b/akka-cluster-sharding-typed/src/main/resources/reference.conf index efa8f72c6f..1fa23e5cab 100644 --- a/akka-cluster-sharding-typed/src/main/resources/reference.conf +++ b/akka-cluster-sharding-typed/src/main/resources/reference.conf @@ -1,7 +1,7 @@ # //#sharding-ext-config # //#number-of-shards -akka.cluster.sharding { +pekko.cluster.sharding { # Number of shards used by the default HashCodeMessageExtractor # when no other message extractor is defined. This value must be # the same for all nodes in the cluster and that is verified by @@ -13,13 +13,13 @@ akka.cluster.sharding { # //#sharding-ext-config -akka.cluster.sharded-daemon-process { - # Settings for the sharded dameon process internal usage of sharding are using the akka.cluste.sharding defaults. +pekko.cluster.sharded-daemon-process { + # Settings for the sharded dameon process internal usage of sharding are using the pekko.cluste.sharding defaults. # Some of the settings can be overriden specifically for the sharded daemon process here. For example can the # `role` setting limit what nodes the daemon processes and the keep alive pingers will run on. # Some settings can not be changed (remember-entitites and related settings, passivation, number-of-shards), # overriding those settings will be ignored. - sharding = ${akka.cluster.sharding} + sharding = ${pekko.cluster.sharding} # Each entity is pinged at this interval from each node in the # cluster to trigger a start if it has stopped, for example during @@ -28,11 +28,11 @@ akka.cluster.sharded-daemon-process { keep-alive-interval = 10s } -akka.cluster.configuration-compatibility-check.checkers { - akka-cluster-sharding-hash-extractor = "org.apache.pekko.cluster.sharding.typed.internal.JoinConfigCompatCheckerClusterSharding" +pekko.cluster.configuration-compatibility-check.checkers { + pekko-cluster-sharding-hash-extractor = "org.apache.pekko.cluster.sharding.typed.internal.JoinConfigCompatCheckerClusterSharding" } -akka.actor { +pekko.actor { serializers { typed-sharding = "org.apache.pekko.cluster.sharding.typed.internal.ShardingSerializer" } @@ -44,9 +44,9 @@ akka.actor { } } -akka.reliable-delivery { +pekko.reliable-delivery { sharding { - producer-controller = ${akka.reliable-delivery.producer-controller} + producer-controller = ${pekko.reliable-delivery.producer-controller} producer-controller { # Limit of how many messages that can be buffered when there # is no demand from the consumer side. @@ -65,11 +65,11 @@ akka.reliable-delivery { resend-first-unconfirmed-idle-timeout = 10s # Chunked messages not implemented for sharding yet. Override to not - # propagate property from akka.reliable-delivery.producer-controller. + # propagate property from pekko.reliable-delivery.producer-controller. chunk-large-messages = off } - consumer-controller = ${akka.reliable-delivery.consumer-controller} + consumer-controller = ${pekko.reliable-delivery.consumer-controller} consumer-controller { # Limit of how many messages that can be buffered before the # ShardingConsumerController is initialized by the Start message. diff --git a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ClusterShardingSettings.scala b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ClusterShardingSettings.scala index 96c5ee15f9..d890d7e8b8 100644 --- a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ClusterShardingSettings.scala +++ b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ClusterShardingSettings.scala @@ -26,7 +26,7 @@ object ClusterShardingSettings { /** Scala API: Creates new cluster sharding settings object */ def apply(system: ActorSystem[_]): ClusterShardingSettings = - fromConfig(system.settings.config.getConfig("akka.cluster.sharding")) + fromConfig(system.settings.config.getConfig("pekko.cluster.sharding")) def fromConfig(config: Config): ClusterShardingSettings = { val classicSettings = ClassicShardingSettings(config) diff --git a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ShardedDaemonProcessSettings.scala b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ShardedDaemonProcessSettings.scala index 4353f3a3b5..a93b368d33 100644 --- a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ShardedDaemonProcessSettings.scala +++ b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ShardedDaemonProcessSettings.scala @@ -19,7 +19,7 @@ object ShardedDaemonProcessSettings { /** Scala API: Create default settings for system */ def apply(system: ActorSystem[_]): ShardedDaemonProcessSettings = { - fromConfig(system.settings.config.getConfig("akka.cluster.sharded-daemon-process")) + fromConfig(system.settings.config.getConfig("pekko.cluster.sharded-daemon-process")) } /** Java API: Create default settings for system */ diff --git a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingConsumerController.scala b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingConsumerController.scala index b546538cf2..839c6e56c9 100644 --- a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingConsumerController.scala +++ b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingConsumerController.scala @@ -43,22 +43,22 @@ object ShardingConsumerController { object Settings { /** - * Scala API: Factory method from config `akka.reliable-delivery.sharding.consumer-controller` + * Scala API: Factory method from config `pekko.reliable-delivery.sharding.consumer-controller` * of the `ActorSystem`. */ def apply(system: ActorSystem[_]): Settings = - apply(system.settings.config.getConfig("akka.reliable-delivery.sharding.consumer-controller")) + apply(system.settings.config.getConfig("pekko.reliable-delivery.sharding.consumer-controller")) /** * Scala API: Factory method from Config corresponding to - * `akka.reliable-delivery.sharding.consumer-controller`. + * `pekko.reliable-delivery.sharding.consumer-controller`. */ def apply(config: Config): Settings = { new Settings(bufferSize = config.getInt("buffer-size"), ConsumerController.Settings(config)) } /** - * Java API: Factory method from config `akka.reliable-delivery.sharding.consumer-controller` + * Java API: Factory method from config `pekko.reliable-delivery.sharding.consumer-controller` * of the `ActorSystem`. */ def create(system: ActorSystem[_]): Settings = @@ -66,7 +66,7 @@ object ShardingConsumerController { /** * Java API: Factory method from Config corresponding to - * `akka.reliable-delivery.sharding.consumer-controller`. + * `pekko.reliable-delivery.sharding.consumer-controller`. */ def create(config: Config): Settings = apply(config) diff --git a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingProducerController.scala b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingProducerController.scala index d2d555b868..5c06c2faee 100644 --- a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingProducerController.scala +++ b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingProducerController.scala @@ -163,15 +163,15 @@ object ShardingProducerController { object Settings { /** - * Scala API: Factory method from config `akka.reliable-delivery.sharding.producer-controller` + * Scala API: Factory method from config `pekko.reliable-delivery.sharding.producer-controller` * of the `ActorSystem`. */ def apply(system: ActorSystem[_]): Settings = - apply(system.settings.config.getConfig("akka.reliable-delivery.sharding.producer-controller")) + apply(system.settings.config.getConfig("pekko.reliable-delivery.sharding.producer-controller")) /** * Scala API: Factory method from Config corresponding to - * `akka.reliable-delivery.sharding.producer-controller`. + * `pekko.reliable-delivery.sharding.producer-controller`. */ def apply(config: Config): Settings = { new Settings( @@ -183,7 +183,7 @@ object ShardingProducerController { } /** - * Java API: Factory method from config `akka.reliable-delivery.sharding.producer-controller` + * Java API: Factory method from config `pekko.reliable-delivery.sharding.producer-controller` * of the `ActorSystem`. */ def create(system: ActorSystem[_]): Settings = @@ -191,7 +191,7 @@ object ShardingProducerController { /** * Java API: Factory method from Config corresponding to - * `akka.reliable-delivery.sharding.producer-controller`. + * `pekko.reliable-delivery.sharding.producer-controller`. */ def create(config: Config): Settings = apply(config) diff --git a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/internal/JoinConfigCompatCheckerClusterSharding.scala b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/internal/JoinConfigCompatCheckerClusterSharding.scala index 8c6ca26c2e..9a6535322e 100644 --- a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/internal/JoinConfigCompatCheckerClusterSharding.scala +++ b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/internal/JoinConfigCompatCheckerClusterSharding.scala @@ -19,7 +19,7 @@ import pekko.cluster.{ ConfigValidation, JoinConfigCompatChecker, Valid } private[pekko] final class JoinConfigCompatCheckerClusterSharding extends JoinConfigCompatChecker { override def requiredKeys: im.Seq[String] = - im.Seq("akka.cluster.sharding.number-of-shards") + im.Seq("pekko.cluster.sharding.number-of-shards") override def check(toCheck: Config, actualConfig: Config): ConfigValidation = { if (toCheck.hasPath(requiredKeys.head)) diff --git a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/internal/ShardedDaemonProcessImpl.scala b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/internal/ShardedDaemonProcessImpl.scala index 73716951f8..4b21c29b1b 100644 --- a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/internal/ShardedDaemonProcessImpl.scala +++ b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/internal/ShardedDaemonProcessImpl.scala @@ -133,9 +133,9 @@ private[pekko] final class ShardedDaemonProcessImpl(system: ActorSystem[_]) val shardingBaseSettings = settings.shardingSettings match { case None => - // defaults in akka.cluster.sharding but allow overrides specifically for sharded-daemon-process + // defaults in pekko.cluster.sharding but allow overrides specifically for sharded-daemon-process ClusterShardingSettings.fromConfig( - system.settings.config.getConfig("akka.cluster.sharded-daemon-process.sharding")) + system.settings.config.getConfig("pekko.cluster.sharded-daemon-process.sharding")) case Some(shardingSettings) => shardingSettings } diff --git a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/javadsl/ClusterSharding.scala b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/javadsl/ClusterSharding.scala index 2738357791..5786dc3ecd 100644 --- a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/javadsl/ClusterSharding.scala +++ b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/javadsl/ClusterSharding.scala @@ -78,7 +78,7 @@ object ClusterSharding { * Messages can also be sent via the [[EntityRef]] retrieved with [[ClusterSharding#entityRefFor]], * which will also send via the local `ShardRegion`. * - * Some settings can be configured as described in the `akka.cluster.sharding` + * Some settings can be configured as described in the `pekko.cluster.sharding` * section of the `reference.conf`. * * The `ShardRegion` actor is started on each node in the cluster, or group of nodes @@ -285,7 +285,7 @@ final class Entity[M, E] private ( * them in [[ShardingEnvelope]] with the entityId of the recipient actor. That envelope * is used by the [[HashCodeMessageExtractor]] for extracting entityId and shardId. The number of * shards is then defined by `numberOfShards` in `ClusterShardingSettings`, which by default - * is configured with `akka.cluster.sharding.number-of-shards`. + * is configured with `pekko.cluster.sharding.number-of-shards`. */ def withMessageExtractor[Envelope](newExtractor: ShardingMessageExtractor[Envelope, M]): Entity[M, Envelope] = new Entity( diff --git a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterSharding.scala b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterSharding.scala index a2c2726542..6a54c61855 100644 --- a/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterSharding.scala +++ b/akka-cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterSharding.scala @@ -78,7 +78,7 @@ object ClusterSharding extends ExtensionId[ClusterSharding] { * Messages can also be sent via the [[EntityRef]] retrieved with [[ClusterSharding#entityRefFor]], * which will also send via the local `ShardRegion`. * - * Some settings can be configured as described in the `akka.cluster.sharding` + * Some settings can be configured as described in the `pekko.cluster.sharding` * section of the `reference.conf`. * * The `ShardRegion` actor is started on each node in the cluster, or group of nodes @@ -280,7 +280,7 @@ final class Entity[M, E] private[pekko] ( * them in [[ShardingEnvelope]] with the entityId of the recipient actor. That envelope * is used by the [[HashCodeMessageExtractor]] for extracting entityId and shardId. The number of * shards is then defined by `numberOfShards` in `ClusterShardingSettings`, which by default - * is configured with `akka.cluster.sharding.number-of-shards`. + * is configured with `pekko.cluster.sharding.number-of-shards`. */ def withMessageExtractor[Envelope](newExtractor: ShardingMessageExtractor[Envelope, M]): Entity[M, Envelope] = new Entity( diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRememberEntitiesPerfSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRememberEntitiesPerfSpec.scala index 2cb63bb198..7a4b265d1a 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRememberEntitiesPerfSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRememberEntitiesPerfSpec.scala @@ -63,13 +63,13 @@ object ClusterShardingRememberEntitiesPerfSpecConfig extends MultiNodeClusterShardingConfig( rememberEntities = true, additionalConfig = s""" - akka.loglevel = DEBUG - akka.testconductor.barrier-timeout = 3 minutes - akka.remote.artery.advanced.outbound-message-queue-size = 10000 - akka.remote.artery.advanced.maximum-frame-size = 512 KiB + pekko.loglevel = DEBUG + pekko.testconductor.barrier-timeout = 3 minutes + pekko.remote.artery.advanced.outbound-message-queue-size = 10000 + pekko.remote.artery.advanced.maximum-frame-size = 512 KiB # comment next line to enable durable lmdb storage - akka.cluster.sharding.distributed-data.durable.keys = [] - akka.cluster.sharding { + pekko.cluster.sharding.distributed-data.durable.keys = [] + pekko.cluster.sharding { remember-entities = on } """) { @@ -79,7 +79,7 @@ object ClusterShardingRememberEntitiesPerfSpecConfig val third = role("third") nodeConfig(third)(ConfigFactory.parseString(s""" - akka.cluster.sharding.distributed-data.durable.lmdb { + pekko.cluster.sharding.distributed-data.durable.lmdb { # use same directory when starting new node on third (not used at same time) dir = "$targetDir/sharding-third" } diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ClusterShardingPreparingForShutdownSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ClusterShardingPreparingForShutdownSpec.scala index 311fca4c77..9bf96089c6 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ClusterShardingPreparingForShutdownSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ClusterShardingPreparingForShutdownSpec.scala @@ -32,12 +32,12 @@ object ClusterShardingPreparingForShutdownSpec extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = off - akka.cluster.leader-actions-interval = 100ms + pekko.loglevel = DEBUG + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = off + pekko.cluster.leader-actions-interval = 100ms """)) object Pinger { diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ClusterShardingStatsSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ClusterShardingStatsSpec.scala index 2a19f5b86f..8898883d44 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ClusterShardingStatsSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ClusterShardingStatsSpec.scala @@ -29,9 +29,9 @@ object ClusterShardingStatsSpecConfig extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.log-dead-letters-during-shutdown = off - akka.cluster.sharding.updating-state-timeout = 2s - akka.cluster.sharding.waiting-for-state-timeout = 2s + pekko.log-dead-letters-during-shutdown = off + pekko.cluster.sharding.updating-state-timeout = 2s + pekko.cluster.sharding.waiting-for-state-timeout = 2s """).withFallback(MultiNodeClusterSpec.clusterConfig)) } diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/MultiDcClusterShardingSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/MultiDcClusterShardingSpec.scala index 84612ae458..16eb31d16a 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/MultiDcClusterShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/MultiDcClusterShardingSpec.scala @@ -25,8 +25,8 @@ object MultiDcClusterShardingSpecConfig extends MultiNodeConfig { val fourth = role("fourth") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.cluster.sharding { + pekko.loglevel = DEBUG + pekko.cluster.sharding { number-of-shards = 10 # First is likely to be ignored as shard coordinator not ready retry-interval = 0.2s @@ -34,11 +34,11 @@ object MultiDcClusterShardingSpecConfig extends MultiNodeConfig { """).withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first, second)(ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = "dc1" + pekko.cluster.multi-data-center.self-data-center = "dc1" """)) nodeConfig(third, fourth)(ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = "dc2" + pekko.cluster.multi-data-center.self-data-center = "dc2" """)) testTransport(on = true) diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ReplicatedShardingSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ReplicatedShardingSpec.scala index 5c1aac20ef..4490ee073d 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ReplicatedShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ReplicatedShardingSpec.scala @@ -37,26 +37,26 @@ object ReplicatedShardingSpec extends MultiNodeConfig { val second = role("second") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" + pekko.loglevel = DEBUG + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" // for the proxy plugin - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """).withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first)(ConfigFactory.parseString(""" - akka.persistence.journal.plugin = "akka.persistence.journal.proxy" - akka.persistence.journal.proxy { + pekko.persistence.journal.plugin = "pekko.persistence.journal.proxy" + pekko.persistence.journal.proxy { start-target-journal = on - target-journal-plugin = "akka.persistence.journal.inmem" + target-journal-plugin = "pekko.persistence.journal.inmem" } """)) nodeConfig(second)(ConfigFactory.parseString(""" - akka.persistence.journal.plugin = "akka.persistence.journal.proxy" - akka.persistence.journal.proxy { + pekko.persistence.journal.plugin = "pekko.persistence.journal.proxy" + pekko.persistence.journal.proxy { start-target-journal = off - target-journal-plugin = "akka.persistence.journal.inmem" + target-journal-plugin = "pekko.persistence.journal.inmem" } """)) diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ShardedDaemonProcessSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ShardedDaemonProcessSpec.scala index eb4a7f6f6e..fa8b7b770e 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ShardedDaemonProcessSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/ShardedDaemonProcessSpec.scala @@ -49,8 +49,8 @@ object ShardedDaemonProcessSpec extends MultiNodeConfig { } commonConfig(ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.cluster.sharded-daemon-process { + pekko.loglevel = DEBUG + pekko.cluster.sharded-daemon-process { sharding { # First is likely to be ignored as shard coordinator not ready retry-interval = 0.2s diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/delivery/DeliveryThroughputSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/delivery/DeliveryThroughputSpec.scala index 57d10dd5e6..f301ae5bb8 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/delivery/DeliveryThroughputSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/typed/delivery/DeliveryThroughputSpec.scala @@ -46,12 +46,12 @@ object DeliveryThroughputSpec extends MultiNodeConfig { val cfg = ConfigFactory.parseString(s""" # for serious measurements you should increase the totalMessagesFactor (30) org.apache.pekko.test.DeliveryThroughputSpec.totalMessagesFactor = 10.0 - akka.reliable-delivery { + pekko.reliable-delivery { consumer-controller.flow-control-window = 50 sharding.consumer-controller.flow-control-window = 50 sharding.producer-controller.cleanup-unused-after = 5s } - akka { + pekko { loglevel = INFO log-dead-letters = off testconductor.barrier-timeout = ${barrierTimeout.toSeconds}s @@ -66,7 +66,7 @@ object DeliveryThroughputSpec extends MultiNodeConfig { nodeConfig(second, third) { ConfigFactory.parseString(""" - akka.cluster.roles = ["worker"] + pekko.cluster.roles = ["worker"] """) } @@ -199,7 +199,7 @@ object DeliveryThroughputSpec extends MultiNodeConfig { context.messageAdapter[WorkPullingProducerController.RequestNext[Consumer.Command]](WrappedRequestNext(_)) var startTime = System.nanoTime() var remaining = numberOfMessages + context.system.settings.config - .getInt("akka.reliable-delivery.consumer-controller.flow-control-window") + .getInt("pekko.reliable-delivery.consumer-controller.flow-control-window") Behaviors.receiveMessage { case WrappedRequestNext(next) => @@ -246,7 +246,7 @@ object DeliveryThroughputSpec extends MultiNodeConfig { context.messageAdapter[ShardingProducerController.RequestNext[Consumer.Command]](WrappedRequestNext(_)) var startTime = System.nanoTime() var remaining = numberOfMessages + context.system.settings.config - .getInt("akka.reliable-delivery.sharding.consumer-controller.flow-control-window") + .getInt("pekko.reliable-delivery.sharding.consumer-controller.flow-control-window") var latestDemand: ShardingProducerController.RequestNext[Consumer.Command] = null var messagesSentToEachEntity: Map[String, Long] = Map.empty[String, Long].withDefaultValue(0L) @@ -323,7 +323,7 @@ abstract class DeliveryThroughputSpec private val settingsToReport = List( "org.apache.pekko.test.DeliveryThroughputSpec.totalMessagesFactor", - "akka.reliable-delivery.consumer-controller.flow-control-window") + "pekko.reliable-delivery.consumer-controller.flow-control-window") private val resultReporter = BenchmarkFileReporter("DeliveryThroughputSpec", system, settingsToReport) def testPointToPoint(testSettings: TestSettings): Unit = { diff --git a/akka-cluster-sharding-typed/src/test/java/jdocs/org/apache/pekko/cluster/sharding/typed/AccountExampleTest.java b/akka-cluster-sharding-typed/src/test/java/jdocs/org/apache/pekko/cluster/sharding/typed/AccountExampleTest.java index d12206e25c..0af191464a 100644 --- a/akka-cluster-sharding-typed/src/test/java/jdocs/org/apache/pekko/cluster/sharding/typed/AccountExampleTest.java +++ b/akka-cluster-sharding-typed/src/test/java/jdocs/org/apache/pekko/cluster/sharding/typed/AccountExampleTest.java @@ -37,12 +37,12 @@ public class AccountExampleTest extends JUnitSuite { public static final Config config = ConfigFactory.parseString( - "akka.actor.provider = cluster \n" - + "akka.remote.classic.netty.tcp.port = 0 \n" - + "akka.remote.artery.canonical.port = 0 \n" - + "akka.remote.artery.canonical.hostname = 127.0.0.1 \n" - + "akka.persistence.journal.plugin = \"akka.persistence.journal.inmem\" \n" - + "akka.persistence.journal.inmem.test-serialization = on \n"); + "pekko.actor.provider = cluster \n" + + "pekko.remote.classic.netty.tcp.port = 0 \n" + + "pekko.remote.artery.canonical.port = 0 \n" + + "pekko.remote.artery.canonical.hostname = 127.0.0.1 \n" + + "pekko.persistence.journal.plugin = \"pekko.persistence.journal.inmem\" \n" + + "pekko.persistence.journal.inmem.test-serialization = on \n"); @ClassRule public static final TestKitJunitResource testKit = new TestKitJunitResource(config); diff --git a/akka-cluster-sharding-typed/src/test/java/jdocs/org/apache/pekko/cluster/sharding/typed/HelloWorldEventSourcedEntityExampleTest.java b/akka-cluster-sharding-typed/src/test/java/jdocs/org/apache/pekko/cluster/sharding/typed/HelloWorldEventSourcedEntityExampleTest.java index 6c2755ff3e..d5c03eeb79 100644 --- a/akka-cluster-sharding-typed/src/test/java/jdocs/org/apache/pekko/cluster/sharding/typed/HelloWorldEventSourcedEntityExampleTest.java +++ b/akka-cluster-sharding-typed/src/test/java/jdocs/org/apache/pekko/cluster/sharding/typed/HelloWorldEventSourcedEntityExampleTest.java @@ -27,12 +27,12 @@ public class HelloWorldEventSourcedEntityExampleTest extends JUnitSuite { public static final Config config = ConfigFactory.parseString( - "akka.actor.provider = cluster \n" - + "akka.remote.classic.netty.tcp.port = 0 \n" - + "akka.remote.artery.canonical.port = 0 \n" - + "akka.remote.artery.canonical.hostname = 127.0.0.1 \n" - + "akka.persistence.journal.plugin = \"akka.persistence.journal.inmem\" \n" - + "akka.persistence.journal.inmem.test-serialization = on \n"); + "pekko.actor.provider = cluster \n" + + "pekko.remote.classic.netty.tcp.port = 0 \n" + + "pekko.remote.artery.canonical.port = 0 \n" + + "pekko.remote.artery.canonical.hostname = 127.0.0.1 \n" + + "pekko.persistence.journal.plugin = \"pekko.persistence.journal.inmem\" \n" + + "pekko.persistence.journal.inmem.test-serialization = on \n"); @ClassRule public static final TestKitJunitResource testKit = new TestKitJunitResource(config); diff --git a/akka-cluster-sharding-typed/src/test/java/org/apache/pekko/cluster/sharding/typed/ReplicatedShardingTest.java b/akka-cluster-sharding-typed/src/test/java/org/apache/pekko/cluster/sharding/typed/ReplicatedShardingTest.java index a9647a4b6d..1557d39f0c 100644 --- a/akka-cluster-sharding-typed/src/test/java/org/apache/pekko/cluster/sharding/typed/ReplicatedShardingTest.java +++ b/akka-cluster-sharding-typed/src/test/java/org/apache/pekko/cluster/sharding/typed/ReplicatedShardingTest.java @@ -211,13 +211,13 @@ public class ReplicatedShardingTest extends JUnitSuite { public static final TestKitJunitResource testKit = new TestKitJunitResource( ConfigFactory.parseString( - " akka.loglevel = DEBUG\n" - + " akka.loggers = [\"org.apache.pekko.testkit.SilenceAllTestEventListener\"]\n" - + " akka.actor.provider = \"cluster\"\n" + " pekko.loglevel = DEBUG\n" + + " pekko.loggers = [\"org.apache.pekko.testkit.SilenceAllTestEventListener\"]\n" + + " pekko.actor.provider = \"cluster\"\n" + " # pretend we're a node in all dc:s\n" - + " akka.cluster.roles = [\"DC-A\", \"DC-B\", \"DC-C\"]\n" - + " akka.remote.classic.netty.tcp.port = 0\n" - + " akka.remote.artery.canonical.port = 0") + + " pekko.cluster.roles = [\"DC-A\", \"DC-B\", \"DC-C\"]\n" + + " pekko.remote.classic.netty.tcp.port = 0\n" + + " pekko.remote.artery.canonical.port = 0") .withFallback(PersistenceTestKitPlugin.getInstance().config())); @Rule public final LogCapturing logCapturing = new LogCapturing(); diff --git a/akka-cluster-sharding-typed/src/test/java/org/apache/pekko/cluster/sharding/typed/javadsl/ClusterShardingPersistenceTest.java b/akka-cluster-sharding-typed/src/test/java/org/apache/pekko/cluster/sharding/typed/javadsl/ClusterShardingPersistenceTest.java index 9157259d1e..db84ab9151 100644 --- a/akka-cluster-sharding-typed/src/test/java/org/apache/pekko/cluster/sharding/typed/javadsl/ClusterShardingPersistenceTest.java +++ b/akka-cluster-sharding-typed/src/test/java/org/apache/pekko/cluster/sharding/typed/javadsl/ClusterShardingPersistenceTest.java @@ -29,12 +29,12 @@ public class ClusterShardingPersistenceTest extends JUnitSuite { public static final Config config = ConfigFactory.parseString( - "akka.actor.provider = cluster \n" - + "akka.remote.classic.netty.tcp.port = 0 \n" - + "akka.remote.artery.canonical.port = 0 \n" - + "akka.remote.artery.canonical.hostname = 127.0.0.1 \n" - + "akka.persistence.journal.plugin = \"akka.persistence.journal.inmem\" \n" - + "akka.persistence.journal.inmem.test-serialization = on \n"); + "pekko.actor.provider = cluster \n" + + "pekko.remote.classic.netty.tcp.port = 0 \n" + + "pekko.remote.artery.canonical.port = 0 \n" + + "pekko.remote.artery.canonical.hostname = 127.0.0.1 \n" + + "pekko.persistence.journal.plugin = \"pekko.persistence.journal.inmem\" \n" + + "pekko.persistence.journal.inmem.test-serialization = on \n"); @ClassRule public static final TestKitJunitResource testKit = new TestKitJunitResource(config); diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/org/apache/pekko/cluster/sharding/typed/AccountExampleSpec.scala b/akka-cluster-sharding-typed/src/test/scala/docs/org/apache/pekko/cluster/sharding/typed/AccountExampleSpec.scala index 125b970fb4..28a30b3843 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/org/apache/pekko/cluster/sharding/typed/AccountExampleSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/org/apache/pekko/cluster/sharding/typed/AccountExampleSpec.scala @@ -22,14 +22,14 @@ import org.scalatest.wordspec.AnyWordSpecLike object AccountExampleSpec { val config = ConfigFactory.parseString(""" - akka.actor.provider = cluster + pekko.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.artery.canonical.hostname = 127.0.0.1 + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.hostname = 127.0.0.1 - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.journal.inmem.test-serialization = on + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.journal.inmem.test-serialization = on """) } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/org/apache/pekko/cluster/sharding/typed/HelloWorldEventSourcedEntityExampleSpec.scala b/akka-cluster-sharding-typed/src/test/scala/docs/org/apache/pekko/cluster/sharding/typed/HelloWorldEventSourcedEntityExampleSpec.scala index bb6fabb895..89c5c535bf 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/org/apache/pekko/cluster/sharding/typed/HelloWorldEventSourcedEntityExampleSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/org/apache/pekko/cluster/sharding/typed/HelloWorldEventSourcedEntityExampleSpec.scala @@ -17,14 +17,14 @@ import org.scalatest.wordspec.AnyWordSpecLike object HelloWorldEventSourcedEntityExampleSpec { val config = ConfigFactory.parseString(""" - akka.actor.provider = cluster + pekko.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.artery.canonical.hostname = 127.0.0.1 + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.hostname = 127.0.0.1 - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.journal.inmem.test-serialization = on + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.journal.inmem.test-serialization = on """) } diff --git a/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/JoinConfigCompatCheckerClusterShardingSpec.scala b/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/JoinConfigCompatCheckerClusterShardingSpec.scala index 884cb432a5..88ec57a406 100644 --- a/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/JoinConfigCompatCheckerClusterShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/JoinConfigCompatCheckerClusterShardingSpec.scala @@ -22,16 +22,16 @@ object JoinConfigCompatCheckerClusterShardingSpec { val Shards = 2 - val Key = "akka.cluster.sharding.number-of-shards" + val Key = "pekko.cluster.sharding.number-of-shards" val baseConfig: Config = ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.cluster.sharding.state-store-mode = "persistence" - akka.cluster.configuration-compatibility-check.enforce-on-join = on - akka.cluster.jmx.enabled = off - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider = "cluster" + pekko.cluster.sharding.state-store-mode = "persistence" + pekko.cluster.configuration-compatibility-check.enforce-on-join = on + pekko.cluster.jmx.enabled = off + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """) def clusterConfig: Config = diff --git a/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/ReplicatedShardingSpec.scala b/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/ReplicatedShardingSpec.scala index 08283b4108..93a52b9606 100644 --- a/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/ReplicatedShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/ReplicatedShardingSpec.scala @@ -39,26 +39,26 @@ import org.scalatest.time.Span @ccompatUsedUntil213 object ReplicatedShardingSpec { def commonConfig = ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0""").withFallback(PersistenceTestKitPlugin.config) + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0""").withFallback(PersistenceTestKitPlugin.config) def roleAConfig = ConfigFactory.parseString(""" - akka.cluster.roles = ["DC-A"] + pekko.cluster.roles = ["DC-A"] """.stripMargin).withFallback(commonConfig) def roleBConfig = ConfigFactory.parseString(""" - akka.cluster.roles = ["DC-B"] + pekko.cluster.roles = ["DC-B"] """.stripMargin).withFallback(commonConfig) def dcAConfig = ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = "DC-A" + pekko.cluster.multi-data-center.self-data-center = "DC-A" """).withFallback(commonConfig) def dcBConfig = ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = "DC-B" + pekko.cluster.multi-data-center.self-data-center = "DC-B" """).withFallback(commonConfig) sealed trait ReplicationType diff --git a/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/delivery/DurableShardingSpec.scala b/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/delivery/DurableShardingSpec.scala index 80eb2c3b97..7574158ea6 100644 --- a/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/delivery/DurableShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/delivery/DurableShardingSpec.scala @@ -36,13 +36,13 @@ import pekko.persistence.typed.delivery.EventSourcedProducerQueue object DurableShardingSpec { def conf: Config = ConfigFactory.parseString(s""" - akka.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/DurableShardingSpec-${UUID.randomUUID().toString}" - akka.reliable-delivery.consumer-controller.flow-control-window = 20 + pekko.actor.provider = cluster + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/DurableShardingSpec-${UUID.randomUUID().toString}" + pekko.reliable-delivery.consumer-controller.flow-control-window = 20 """) } diff --git a/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/delivery/ReliableDeliveryShardingSpec.scala b/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/delivery/ReliableDeliveryShardingSpec.scala index 6f5c1a164d..b8a71614f9 100644 --- a/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/delivery/ReliableDeliveryShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/delivery/ReliableDeliveryShardingSpec.scala @@ -33,10 +33,10 @@ import pekko.cluster.typed.Join object ReliableDeliveryShardingSpec { val config = ConfigFactory.parseString(""" - akka.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.reliable-delivery.consumer-controller.flow-control-window = 20 + pekko.actor.provider = cluster + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.reliable-delivery.consumer-controller.flow-control-window = 20 """) object TestShardingProducer { diff --git a/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterShardingPersistenceSpec.scala b/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterShardingPersistenceSpec.scala index cce46375ff..5f46622aad 100644 --- a/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterShardingPersistenceSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterShardingPersistenceSpec.scala @@ -40,17 +40,17 @@ import pekko.persistence.typed.scaladsl.EventSourcedBehavior object ClusterShardingPersistenceSpec { val config = ConfigFactory.parseString(""" - akka.loglevel = INFO - #akka.persistence.typed.log-stashing = on + pekko.loglevel = INFO + #pekko.persistence.typed.log-stashing = on - akka.actor.provider = cluster + pekko.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.artery.canonical.hostname = 127.0.0.1 + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.hostname = 127.0.0.1 - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.journal.inmem.test-serialization = on + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.journal.inmem.test-serialization = on """) sealed trait Command diff --git a/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala b/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala index d19c1c7d57..7743f907fd 100644 --- a/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala @@ -35,18 +35,18 @@ import pekko.util.ccompat._ @ccompatUsedUntil213 object ClusterShardingSpec { val config = ConfigFactory.parseString(s""" - akka.actor.provider = cluster + pekko.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.artery.canonical.hostname = 127.0.0.1 + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.hostname = 127.0.0.1 - akka.cluster.jmx.multi-mbeans-in-same-jvm = on + pekko.cluster.jmx.multi-mbeans-in-same-jvm = on - akka.cluster.sharding.number-of-shards = 10 + pekko.cluster.sharding.number-of-shards = 10 - akka.coordinated-shutdown.terminate-actor-system = off - akka.coordinated-shutdown.run-by-actor-system-terminate = off + pekko.coordinated-shutdown.terminate-actor-system = off + pekko.coordinated-shutdown.run-by-actor-system-terminate = off """) sealed trait TestProtocol extends CborSerializable diff --git a/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ShardedDaemonProcessSpec.scala b/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ShardedDaemonProcessSpec.scala index 816d585fa8..cb93b831cd 100644 --- a/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ShardedDaemonProcessSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ShardedDaemonProcessSpec.scala @@ -23,19 +23,19 @@ import pekko.cluster.typed.Join object ShardedDaemonProcessSpec { // single node cluster config def config = ConfigFactory.parseString(""" - akka.actor.provider = cluster + pekko.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.artery.canonical.hostname = 127.0.0.1 + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.hostname = 127.0.0.1 - akka.cluster.jmx.multi-mbeans-in-same-jvm = on + pekko.cluster.jmx.multi-mbeans-in-same-jvm = on # ping often/start fast for test - akka.cluster.sharded-daemon-process.keep-alive-interval = 1s + pekko.cluster.sharded-daemon-process.keep-alive-interval = 1s - akka.coordinated-shutdown.terminate-actor-system = off - akka.coordinated-shutdown.run-by-actor-system-terminate = off + pekko.coordinated-shutdown.terminate-actor-system = off + pekko.coordinated-shutdown.run-by-actor-system-terminate = off """) object MyActor { diff --git a/akka-cluster-sharding/src/main/resources/reference.conf b/akka-cluster-sharding/src/main/resources/reference.conf index dc68fd0369..7baac56445 100644 --- a/akka-cluster-sharding/src/main/resources/reference.conf +++ b/akka-cluster-sharding/src/main/resources/reference.conf @@ -1,5 +1,5 @@ ############################################### -# Akka Cluster Sharding Reference Config File # +# Pekko Cluster Sharding Reference Config File # ############################################### # This is the reference config file that contains all the default settings. @@ -8,7 +8,7 @@ # //#sharding-ext-config # Settings for the ClusterShardingExtension -akka.cluster.sharding { +pekko.cluster.sharding { # The extension creates a top level actor with this name in top level system scope, # e.g. '/system/sharding' @@ -277,13 +277,13 @@ akka.cluster.sharding { # Settings for LeastShardAllocationStrategy. # - # A new rebalance algorithm was included in Akka 2.6.10. It can reach optimal balance in + # A new rebalance algorithm was included in Pekko 2.6.10. It can reach optimal balance in # less rebalance rounds (typically 1 or 2 rounds). The amount of shards to rebalance in each # round can still be limited to make it progress slower. For backwards compatibility, # the new algorithm is not enabled by default. Enable the new algorithm by setting # `rebalance-absolute-limit` > 0, for example: - # akka.cluster.sharding.least-shard-allocation-strategy.rebalance-absolute-limit=20 - # The new algorithm is recommended and will become the default in future versions of Akka. + # pekko.cluster.sharding.least-shard-allocation-strategy.rebalance-absolute-limit=20 + # The new algorithm is recommended and will become the default in future versions of Pekko. least-shard-allocation-strategy { # Maximum number of shards that will be rebalanced in one rebalance round. # The lower of this and `rebalance-relative-limit` will be used. @@ -356,17 +356,17 @@ akka.cluster.sharding { max-updates-per-write = 100 } - # Settings for the coordinator singleton. Same layout as akka.cluster.singleton. + # Settings for the coordinator singleton. Same layout as pekko.cluster.singleton. # The "role" of the singleton configuration is not used. The singleton role will - # be the same as "akka.cluster.sharding.role" if - # "akka.cluster.sharding.coordinator-singleton-role-override" is enabled. Disabling it will allow to + # be the same as "pekko.cluster.sharding.role" if + # "pekko.cluster.sharding.coordinator-singleton-role-override" is enabled. Disabling it will allow to # use separate nodes for the shard coordinator and the shards themselves. # A lease can be configured in these settings for the coordinator singleton - coordinator-singleton = ${akka.cluster.singleton} + coordinator-singleton = ${pekko.cluster.singleton} # Copies the role for the coordinator singleton from the shards role instead of using the one provided in the - # "akka.cluster.sharding.coordinator-singleton.role" + # "pekko.cluster.sharding.coordinator-singleton.role" coordinator-singleton-role-override = on coordinator-state { @@ -387,13 +387,13 @@ akka.cluster.sharding { } # Settings for the Distributed Data replicator. - # Same layout as akka.cluster.distributed-data. + # Same layout as pekko.cluster.distributed-data. # The "role" of the distributed-data configuration is not used. The distributed-data - # role will be the same as "akka.cluster.sharding.role". + # role will be the same as "pekko.cluster.sharding.role". # Note that there is one Replicator per role and it's not possible # to have different distributed-data settings for different sharding entity types. # Only used when state-store-mode=ddata - distributed-data = ${akka.cluster.distributed-data} + distributed-data = ${pekko.cluster.distributed-data} distributed-data { # minCap parameter to MajorityWrite and MajorityRead consistency level. majority-min-cap = 5 @@ -412,7 +412,7 @@ akka.cluster.sharding { # If specified, you need to define the settings of the actual dispatcher. # This dispatcher for the entity actors is defined by the user provided # Props, i.e. this dispatcher is not used for the entity actors. - use-dispatcher = "akka.actor.internal-dispatcher" + use-dispatcher = "pekko.actor.internal-dispatcher" # Config path of the lease that each shard must acquire before starting entity actors # default is no lease @@ -427,11 +427,11 @@ akka.cluster.sharding { verbose-debug-logging = off # Throw an exception if the internal state machine in the Shard actor does an invalid state transition. - # Mostly for the Akka test suite. If off, the invalid transition is logged as a warning instead of throwing and + # Mostly for the Pekko test suite. If off, the invalid transition is logged as a warning instead of throwing and # crashing the shard. fail-on-invalid-entity-state-transition = off - # Healthcheck that can be used with Akka management health checks: https://doc.akka.io/docs/akka-management/current/healthchecks.html + # Healthcheck that can be used with Pekko management health checks: https://doc.akka.io/docs/pekko-management/current/healthchecks.html healthcheck { # sharding names to check have registered with the coordinator for the health check to pass # once initial registration has taken place the health check always returns true to prevent the coordinator @@ -446,26 +446,26 @@ akka.cluster.sharding { } # //#sharding-ext-config -# Enable health check by default for when Akka management is on the classpath -akka.management.health-checks.readiness-checks { +# Enable health check by default for when Pekko management is on the classpath +pekko.management.health-checks.readiness-checks { sharding = "org.apache.pekko.cluster.sharding.ClusterShardingHealthCheck" } -akka.cluster { +pekko.cluster { configuration-compatibility-check { checkers { - akka-cluster-sharding = "org.apache.pekko.cluster.sharding.JoinConfigCompatCheckSharding" + pekko-cluster-sharding = "org.apache.pekko.cluster.sharding.JoinConfigCompatCheckSharding" } } } # Protobuf serializer for Cluster Sharding messages -akka.actor { +pekko.actor { serializers { - akka-sharding = "org.apache.pekko.cluster.sharding.protobuf.ClusterShardingMessageSerializer" + pekko-sharding = "org.apache.pekko.cluster.sharding.protobuf.ClusterShardingMessageSerializer" } serialization-bindings { - "org.apache.pekko.cluster.sharding.ClusterShardingSerializable" = akka-sharding + "org.apache.pekko.cluster.sharding.ClusterShardingSerializable" = pekko-sharding } serialization-identifiers { "org.apache.pekko.cluster.sharding.protobuf.ClusterShardingMessageSerializer" = 13 diff --git a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ClusterSharding.scala b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ClusterSharding.scala index 1c746dee52..a37049d934 100755 --- a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ClusterSharding.scala +++ b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ClusterSharding.scala @@ -74,7 +74,7 @@ import pekko.util.ccompat.JavaConverters._ * 1. At system startup on each cluster node by registering the supported entity types with * the [[ClusterSharding#start]] method * 1. Retrieve the `ShardRegion` actor for a named entity type with [[ClusterSharding#shardRegion]] - * Settings can be configured as described in the `akka.cluster.sharding` section of the `reference.conf`. + * Settings can be configured as described in the `pekko.cluster.sharding` section of the `reference.conf`. * * '''Shard and ShardCoordinator''': * A shard is a group of entities that will be managed together. For the first message in a @@ -182,8 +182,8 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { private lazy val guardian: ActorRef = { val guardianName: String = - system.settings.config.getString("akka.cluster.sharding.guardian-name") - val dispatcher = system.settings.config.getString("akka.cluster.sharding.use-dispatcher") + system.settings.config.getString("pekko.cluster.sharding.guardian-name") + val dispatcher = system.settings.config.getString("pekko.cluster.sharding.use-dispatcher") system.systemActorOf(Props[ClusterShardingGuardian]().withDispatcher(dispatcher), guardianName) } @@ -195,7 +195,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * This method will start a [[ShardRegion]] in proxy mode when there is no match between the roles of * the current cluster node and the role specified in [[ClusterShardingSettings]] passed to this method. * - * Some settings can be configured as described in the `akka.cluster.sharding` section + * Some settings can be configured as described in the `pekko.cluster.sharding` section * of the `reference.conf`. * * @param typeName the name of the entity type @@ -239,7 +239,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * This method will start a [[ShardRegion]] in proxy mode when there is no match between the roles of * the current cluster node and the role specified in [[ClusterShardingSettings]] passed to this method. * - * Some settings can be configured as described in the `akka.cluster.sharding` section + * Some settings can be configured as described in the `pekko.cluster.sharding` section * of the `reference.conf`. * * @param typeName the name of the entity type @@ -329,7 +329,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * This method will start a [[ShardRegion]] in proxy mode when there is no match between the * node roles and the role specified in the [[ClusterShardingSettings]] passed to this method. * - * Some settings can be configured as described in the `akka.cluster.sharding` section + * Some settings can be configured as described in the `pekko.cluster.sharding` section * of the `reference.conf`. * * @param typeName the name of the entity type @@ -365,7 +365,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * This method will start a [[ShardRegion]] in proxy mode when there is no match between the * node roles and the role specified in the [[ClusterShardingSettings]] passed to this method. * - * Some settings can be configured as described in the `akka.cluster.sharding` section + * Some settings can be configured as described in the `pekko.cluster.sharding` section * of the `reference.conf`. * * @param typeName the name of the entity type @@ -394,7 +394,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * This method will start a [[ShardRegion]] in proxy mode when there is no match between the * node roles and the role specified in the [[ClusterShardingSettings]] passed to this method. * - * Some settings can be configured as described in the `akka.cluster.sharding` section + * Some settings can be configured as described in the `pekko.cluster.sharding` section * of the `reference.conf`. * * @param typeName the name of the entity type @@ -440,7 +440,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * This method will start a [[ShardRegion]] in proxy mode when there is no match between the * node roles and the role specified in the [[ClusterShardingSettings]] passed to this method. * - * Some settings can be configured as described in the `akka.cluster.sharding` section + * Some settings can be configured as described in the `pekko.cluster.sharding` section * of the `reference.conf`. * * @param typeName the name of the entity type @@ -472,7 +472,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * This method will start a [[ShardRegion]] in proxy mode when there is no match between the * node roles and the role specified in the [[ClusterShardingSettings]] passed to this method. * - * Some settings can be configured as described in the `akka.cluster.sharding` section + * Some settings can be configured as described in the `pekko.cluster.sharding` section * of the `reference.conf`. * * @param typeName the name of the entity type @@ -491,7 +491,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * entity actors itself. The [[ShardRegion]] actor for this type can later be retrieved with the * [[#shardRegion]] method. * - * Some settings can be configured as described in the `akka.cluster.sharding` section + * Some settings can be configured as described in the `pekko.cluster.sharding` section * of the `reference.conf`. * * @param typeName the name of the entity type @@ -517,7 +517,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * entity actors itself. The [[ShardRegion]] actor for this type can later be retrieved with the * [[#shardRegion]] method. * - * Some settings can be configured as described in the `akka.cluster.sharding` section + * Some settings can be configured as described in the `pekko.cluster.sharding` section * of the `reference.conf`. * * @param typeName the name of the entity type @@ -566,7 +566,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * entity actors itself. The [[ShardRegion]] actor for this type can later be retrieved with the * [[#shardRegion]] method. * - * Some settings can be configured as described in the `akka.cluster.sharding` section + * Some settings can be configured as described in the `pekko.cluster.sharding` section * of the `reference.conf`. * * @param typeName the name of the entity type @@ -585,7 +585,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * entity actors itself. The [[ShardRegion]] actor for this type can later be retrieved with the * [[#shardRegion]] method. * - * Some settings can be configured as described in the `akka.cluster.sharding` section + * Some settings can be configured as described in the `pekko.cluster.sharding` section * of the `reference.conf`. * * @param typeName the name of the entity type @@ -706,7 +706,7 @@ private[pekko] class ClusterShardingGuardian extends Actor { val cluster = Cluster(context.system) val sharding = ClusterSharding(context.system) - val majorityMinCap = context.system.settings.config.getInt("akka.cluster.sharding.distributed-data.majority-min-cap") + val majorityMinCap = context.system.settings.config.getInt("pekko.cluster.sharding.distributed-data.majority-min-cap") private var replicatorByRole = Map.empty[Option[String], ActorRef] private def coordinatorSingletonManagerName(encName: String): String = @@ -717,7 +717,7 @@ private[pekko] class ClusterShardingGuardian extends Actor { private def replicatorSettings(shardingSettings: ClusterShardingSettings) = { val configuredSettings = - ReplicatorSettings(context.system.settings.config.getConfig("akka.cluster.sharding.distributed-data")) + ReplicatorSettings(context.system.settings.config.getConfig("pekko.cluster.sharding.distributed-data")) // Use members within the data center and with the given role (if any) val replicatorRoles = Set(ClusterSettings.DcRolePrefix + cluster.settings.SelfDataCenter) ++ shardingSettings.role val settingsWithRoles = configuredSettings.withRoles(replicatorRoles) diff --git a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ClusterShardingHealthCheck.scala b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ClusterShardingHealthCheck.scala index 23fccb7aef..d633c788ee 100644 --- a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ClusterShardingHealthCheck.scala +++ b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ClusterShardingHealthCheck.scala @@ -54,7 +54,7 @@ final class ClusterShardingHealthCheck private[pekko] ( def this(system: ActorSystem) = this( system, - ClusterShardingHealthCheckSettings(system.settings.config.getConfig("akka.cluster.sharding.healthcheck")), + ClusterShardingHealthCheckSettings(system.settings.config.getConfig("pekko.cluster.sharding.healthcheck")), name => ClusterSharding(system).shardRegion(name)) private implicit val timeout: Timeout = settings.timeout diff --git a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ClusterShardingSettings.scala b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ClusterShardingSettings.scala index 1882204bc1..e81484e7a3 100644 --- a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ClusterShardingSettings.scala +++ b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ClusterShardingSettings.scala @@ -45,14 +45,14 @@ object ClusterShardingSettings { /** * Create settings from the default configuration - * `akka.cluster.sharding`. + * `pekko.cluster.sharding`. */ def apply(system: ActorSystem): ClusterShardingSettings = - apply(system.settings.config.getConfig("akka.cluster.sharding")) + apply(system.settings.config.getConfig("pekko.cluster.sharding")) /** * Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.sharding`. + * the default configuration `pekko.cluster.sharding`. */ def apply(config: Config): ClusterShardingSettings = { @@ -115,13 +115,13 @@ object ClusterShardingSettings { /** * Java API: Create settings from the default configuration - * `akka.cluster.sharding`. + * `pekko.cluster.sharding`. */ def create(system: ActorSystem): ClusterShardingSettings = apply(system) /** * Java API: Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.sharding`. + * the default configuration `pekko.cluster.sharding`. */ def create(config: Config): ClusterShardingSettings = apply(config) diff --git a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/JoinConfigCompatCheckSharding.scala b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/JoinConfigCompatCheckSharding.scala index bb56e12e6c..6981434c14 100644 --- a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/JoinConfigCompatCheckSharding.scala +++ b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/JoinConfigCompatCheckSharding.scala @@ -19,7 +19,7 @@ import pekko.cluster.{ ConfigValidation, JoinConfigCompatChecker } final class JoinConfigCompatCheckSharding extends JoinConfigCompatChecker { override def requiredKeys: im.Seq[String] = - im.Seq("akka.cluster.sharding.state-store-mode") + im.Seq("pekko.cluster.sharding.state-store-mode") override def check(toCheck: Config, actualConfig: Config): ConfigValidation = JoinConfigCompatChecker.fullMatch(requiredKeys, toCheck, actualConfig) diff --git a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/RemoveInternalClusterShardingData.scala b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/RemoveInternalClusterShardingData.scala index a0e094cea7..024e3ec90a 100644 --- a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/RemoveInternalClusterShardingData.scala +++ b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/RemoveInternalClusterShardingData.scala @@ -70,7 +70,7 @@ object RemoveInternalClusterShardingData { if (typeNames.isEmpty) println("Specify the Cluster Sharding type names to remove in program arguments") else { - val journalPluginId = system.settings.config.getString("akka.cluster.sharding.journal-plugin-id") + val journalPluginId = system.settings.config.getString("pekko.cluster.sharding.journal-plugin-id") import system.dispatcher remove(system, journalPluginId, typeNames, remove2dot3Data).onComplete { _ => system.terminate() @@ -90,9 +90,9 @@ object RemoveInternalClusterShardingData { remove2dot3Data: Boolean): Future[Unit] = { val resolvedJournalPluginId = - if (journalPluginId == "") system.settings.config.getString("akka.persistence.journal.plugin") + if (journalPluginId == "") system.settings.config.getString("pekko.persistence.journal.plugin") else journalPluginId - if (resolvedJournalPluginId == "akka.persistence.journal.leveldb-shared") { + if (resolvedJournalPluginId == "pekko.persistence.journal.leveldb-shared") { @nowarn("msg=deprecated") val store = system.actorOf(Props[SharedLeveldbStore](), "store") SharedLeveldbJournal.setStore(store, system) diff --git a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/Shard.scala b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/Shard.scala index e823b44b84..f9fb9a88aa 100644 --- a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/Shard.scala +++ b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/Shard.scala @@ -432,7 +432,7 @@ private[pekko] class Shard( import pekko.cluster.sharding.ShardCoordinator.Internal.CoordinatorMessage - private val verboseDebug = context.system.settings.config.getBoolean("akka.cluster.sharding.verbose-debug-logging") + private val verboseDebug = context.system.settings.config.getBoolean("pekko.cluster.sharding.verbose-debug-logging") private val rememberEntitiesStore: Option[ActorRef] = rememberEntitiesProvider.map { provider => @@ -448,7 +448,7 @@ private[pekko] class Shard( @InternalStableApi private val entities = { val failOnInvalidStateTransition = - context.system.settings.config.getBoolean("akka.cluster.sharding.fail-on-invalid-entity-state-transition") + context.system.settings.config.getBoolean("pekko.cluster.sharding.fail-on-invalid-entity-state-transition") new Entities(log, settings.rememberEntities, verboseDebug, failOnInvalidStateTransition) } diff --git a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ShardCoordinator.scala b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ShardCoordinator.scala index d8a4a01e69..654bd06752 100644 --- a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ShardCoordinator.scala +++ b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ShardCoordinator.scala @@ -661,7 +661,7 @@ abstract class ShardCoordinator( import settings.tuningParameters._ val log = Logging.withMarker(context.system, this) - private val verboseDebug = context.system.settings.config.getBoolean("akka.cluster.sharding.verbose-debug-logging") + private val verboseDebug = context.system.settings.config.getBoolean("pekko.cluster.sharding.verbose-debug-logging") private val ignoreRef = context.system.asInstanceOf[ExtendedActorSystem].provider.ignoreRef val cluster = Cluster(context.system) @@ -1307,7 +1307,7 @@ class PersistentShardCoordinator( import ShardCoordinator.Internal._ import settings.tuningParameters._ - private val verboseDebug = context.system.settings.config.getBoolean("akka.cluster.sharding.verbose-debug-logging") + private val verboseDebug = context.system.settings.config.getBoolean("pekko.cluster.sharding.verbose-debug-logging") override def persistenceId = s"/sharding/${typeName}Coordinator" @@ -1460,7 +1460,7 @@ private[pekko] class DDataShardCoordinator( import pekko.cluster.ddata.Replicator.Update - private val verboseDebug = context.system.settings.config.getBoolean("akka.cluster.sharding.verbose-debug-logging") + private val verboseDebug = context.system.settings.config.getBoolean("pekko.cluster.sharding.verbose-debug-logging") private val stateReadConsistency = settings.tuningParameters.coordinatorStateReadMajorityPlus match { case Int.MaxValue => ReadAll(settings.tuningParameters.waitingForStateTimeout) diff --git a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ShardRegion.scala b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ShardRegion.scala index f7a3857633..15f3ab05f4 100644 --- a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ShardRegion.scala +++ b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ShardRegion.scala @@ -625,7 +625,7 @@ private[pekko] class ShardRegion( val cluster = Cluster(context.system) - private val verboseDebug = context.system.settings.config.getBoolean("akka.cluster.sharding.verbose-debug-logging") + private val verboseDebug = context.system.settings.config.getBoolean("pekko.cluster.sharding.verbose-debug-logging") // sort by age, oldest first val ageOrdering = Member.ageOrdering @@ -681,8 +681,8 @@ private[pekko] class ShardRegion( private def logPassivationStrategy(): Unit = { if (settings.passivationStrategySettings.oldSettingUsed) { log.warning( - "The `akka.cluster.sharding.passivate-idle-entity-after` setting and associated methods are deprecated. " + - "Use the `akka.cluster.sharding.passivation.default-idle-strategy.idle-entity.timeout` setting instead. " + + "The `pekko.cluster.sharding.passivate-idle-entity-after` setting and associated methods are deprecated. " + + "Use the `pekko.cluster.sharding.passivation.default-idle-strategy.idle-entity.timeout` setting instead. " + "See the documentation and reference config for more information on automatic passivation strategies.") } if (settings.rememberEntities) { diff --git a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/external/internal/ExternalShardAllocationClientImpl.scala b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/external/internal/ExternalShardAllocationClientImpl.scala index e9b0d02f8c..afe2d7e299 100644 --- a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/external/internal/ExternalShardAllocationClientImpl.scala +++ b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/external/internal/ExternalShardAllocationClientImpl.scala @@ -56,7 +56,7 @@ final private[external] class ExternalShardAllocationClientImpl(system: ActorSys private val timeout = system.settings.config - .getDuration("akka.cluster.sharding.external-shard-allocation-strategy.client-timeout") + .getDuration("pekko.cluster.sharding.external-shard-allocation-strategy.client-timeout") .asScala private implicit val askTimeout: Timeout = Timeout(timeout * 2) private implicit val ec: MessageDispatcher = system.dispatchers.internalDispatcher diff --git a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/internal/CustomStateStoreModeProvider.scala b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/internal/CustomStateStoreModeProvider.scala index 2faf1bb419..ef70725dc1 100644 --- a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/internal/CustomStateStoreModeProvider.scala +++ b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/internal/CustomStateStoreModeProvider.scala @@ -26,8 +26,8 @@ private[pekko] final class CustomStateStoreModeProvider( private val log = Logging(system, classOf[CustomStateStoreModeProvider]) log.warning("Using custom remember entities store for [{}], not intended for production use.", typeName) - val customStore = if (system.settings.config.hasPath("akka.cluster.sharding.remember-entities-custom-store")) { - val customClassName = system.settings.config.getString("akka.cluster.sharding.remember-entities-custom-store") + val customStore = if (system.settings.config.hasPath("pekko.cluster.sharding.remember-entities-custom-store")) { + val customClassName = system.settings.config.getString("pekko.cluster.sharding.remember-entities-custom-store") val store = system .asInstanceOf[ExtendedActorSystem] diff --git a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/internal/EventSourcedRememberEntitiesShardStore.scala b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/internal/EventSourcedRememberEntitiesShardStore.scala index b934887f98..ffa04ee4fb 100644 --- a/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/internal/EventSourcedRememberEntitiesShardStore.scala +++ b/akka-cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/internal/EventSourcedRememberEntitiesShardStore.scala @@ -74,7 +74,7 @@ private[pekko] final class EventSourcedRememberEntitiesShardStore( import settings.tuningParameters._ private val maxUpdatesPerWrite = context.system.settings.config - .getInt("akka.cluster.sharding.event-sourced-remember-entities-store.max-updates-per-write") + .getInt("pekko.cluster.sharding.event-sourced-remember-entities-store.max-updates-per-write") log.debug("Starting up EventSourcedRememberEntitiesStore") private var state = State() diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/RandomizedBrainResolverIntegrationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/RandomizedBrainResolverIntegrationSpec.scala index c06bfeda70..b4f3754229 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/RandomizedBrainResolverIntegrationSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/RandomizedBrainResolverIntegrationSpec.scala @@ -46,7 +46,7 @@ object RandomizedSplitBrainResolverIntegrationSpec extends MultiNodeConfig { val node9 = role("node9") commonConfig(ConfigFactory.parseString(s""" - akka { + pekko { loglevel = INFO cluster { downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" @@ -80,8 +80,8 @@ object RandomizedSplitBrainResolverIntegrationSpec extends MultiNodeConfig { test.random-seed = ${System.currentTimeMillis()} - akka.testconductor.barrier-timeout = 120 s - akka.cluster.run-coordinated-shutdown-when-down = off + pekko.testconductor.barrier-timeout = 120 s + pekko.cluster.run-coordinated-shutdown-when-down = off """)) testTransport(on = true) @@ -385,13 +385,13 @@ class RandomizedSplitBrainResolverIntegrationSpec } - private val leaseMajorityConfig = ConfigFactory.parseString("""akka.cluster.split-brain-resolver { + private val leaseMajorityConfig = ConfigFactory.parseString("""pekko.cluster.split-brain-resolver { active-strategy = lease-majority }""") case class Scenario(cfg: Config, numberOfNodes: Int) { - val activeStrategy: String = cfg.getString("akka.cluster.split-brain-resolver.active-strategy") + val activeStrategy: String = cfg.getString("pekko.cluster.split-brain-resolver.active-strategy") override def toString: String = s"Scenario($activeStrategy, $numberOfNodes)" @@ -407,7 +407,7 @@ class RandomizedSplitBrainResolverIntegrationSpec for (scenario <- scenarios) { scenario.toString taggedAs LongRunningTest in { // temporarily disabled for aeron-udp in multi-node: https://github.com/akka/akka/pull/30706/ - val arteryConfig = system.settings.config.getConfig("akka.remote.artery") + val arteryConfig = system.settings.config.getConfig("pekko.remote.artery") if (arteryConfig.getInt("canonical.port") == 6000 && arteryConfig.getString("transport") == "aeron-udp") { pending diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverIntegrationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverIntegrationSpec.scala index 88bdec8042..dcfbbe4a5a 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverIntegrationSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverIntegrationSpec.scala @@ -49,7 +49,7 @@ object SplitBrainResolverIntegrationSpec extends MultiNodeConfig { val node9 = role("node9") commonConfig(ConfigFactory.parseString(""" - akka { + pekko { loglevel = INFO cluster { downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" @@ -63,9 +63,9 @@ object SplitBrainResolverIntegrationSpec extends MultiNodeConfig { remote.log-remote-lifecycle-events = off } - akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off - akka.coordinated-shutdown.terminate-actor-system = off - akka.cluster.run-coordinated-shutdown-when-down = off + pekko.coordinated-shutdown.run-by-jvm-shutdown-hook = off + pekko.coordinated-shutdown.terminate-actor-system = off + pekko.cluster.run-coordinated-shutdown-when-down = off """)) testTransport(on = true) @@ -122,7 +122,7 @@ class SplitBrainResolverIntegrationSpec system.name + "-" + c, MultiNodeSpec.configureNextPortIfFixed( scenario.cfg - .withValue("akka.cluster.multi-data-center.self-data-center", ConfigValueFactory.fromAnyRef(dcName)) + .withValue("pekko.cluster.multi-data-center.self-data-center", ConfigValueFactory.fromAnyRef(dcName)) .withFallback(system.settings.config))) val gremlinController = sys.actorOf(GremlinController.props, "gremlinController") system.actorOf(GremlinControllerProxy.props(gremlinController), s"gremlinControllerProxy-$c") @@ -378,21 +378,21 @@ class SplitBrainResolverIntegrationSpec } - private val staticQuorumConfig = ConfigFactory.parseString("""akka.cluster.split-brain-resolver { + private val staticQuorumConfig = ConfigFactory.parseString("""pekko.cluster.split-brain-resolver { active-strategy = static-quorum static-quorum.quorum-size = 5 }""") - private val keepMajorityConfig = ConfigFactory.parseString("""akka.cluster.split-brain-resolver { + private val keepMajorityConfig = ConfigFactory.parseString("""pekko.cluster.split-brain-resolver { active-strategy = keep-majority }""") - private val keepOldestConfig = ConfigFactory.parseString("""akka.cluster.split-brain-resolver { + private val keepOldestConfig = ConfigFactory.parseString("""pekko.cluster.split-brain-resolver { active-strategy = keep-oldest }""") - private val downAllConfig = ConfigFactory.parseString("""akka.cluster.split-brain-resolver { + private val downAllConfig = ConfigFactory.parseString("""pekko.cluster.split-brain-resolver { active-strategy = down-all }""") - private val leaseMajorityConfig = ConfigFactory.parseString("""akka.cluster.split-brain-resolver { + private val leaseMajorityConfig = ConfigFactory.parseString("""pekko.cluster.split-brain-resolver { active-strategy = lease-majority lease-majority { lease-implementation = test-lease @@ -425,7 +425,7 @@ class SplitBrainResolverIntegrationSpec dcDecider: RoleName => DataCenter = defaultDcDecider // allows to set the dc per indexed node ) { - val activeStrategy: String = cfg.getString("akka.cluster.split-brain-resolver.active-strategy") + val activeStrategy: String = cfg.getString("pekko.cluster.split-brain-resolver.active-strategy") override def toString: String = { s"$expected when using $activeStrategy and side1=$side1Size and side2=$side2Size" + @@ -461,7 +461,7 @@ class SplitBrainResolverIntegrationSpec for (scenario <- scenarios) { scenario.toString taggedAs LongRunningTest in { // temporarily disabled for aeron-udp in multi-node: https://github.com/akka/akka/pull/30706/ - val arteryConfig = system.settings.config.getConfig("akka.remote.artery") + val arteryConfig = system.settings.config.getConfig("pekko.remote.artery") if (arteryConfig.getInt("canonical.port") == 6000 && arteryConfig.getString("transport") == "aeron-udp") { pending diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardCoordinatorDowning2Spec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardCoordinatorDowning2Spec.scala index 9ce01773f1..5842b0066b 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardCoordinatorDowning2Spec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardCoordinatorDowning2Spec.scala @@ -52,10 +52,10 @@ abstract class ClusterShardCoordinatorDowning2SpecConfig(mode: String) mode, loglevel = "INFO", additionalConfig = """ - akka.cluster.sharding.rebalance-interval = 120 s + pekko.cluster.sharding.rebalance-interval = 120 s # setting down-removal-margin, for testing of issue #29131 - akka.cluster.down-removal-margin = 3 s - akka.remote.watch-failure-detector.acceptable-heartbeat-pause = 3s + pekko.cluster.down-removal-margin = 3 s + pekko.remote.watch-failure-detector.acceptable-heartbeat-pause = 3s """) { val first = role("first") val second = role("second") diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardCoordinatorDowningSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardCoordinatorDowningSpec.scala index c1522b7585..9d9b95f54e 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardCoordinatorDowningSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardCoordinatorDowningSpec.scala @@ -52,10 +52,10 @@ abstract class ClusterShardCoordinatorDowningSpecConfig(mode: String) mode, loglevel = "INFO", additionalConfig = """ - akka.cluster.sharding.rebalance-interval = 120 s + pekko.cluster.sharding.rebalance-interval = 120 s # setting down-removal-margin, for testing of issue #29131 - akka.cluster.down-removal-margin = 3 s - akka.remote.watch-failure-detector.acceptable-heartbeat-pause = 3s + pekko.cluster.down-removal-margin = 3 s + pekko.remote.watch-failure-detector.acceptable-heartbeat-pause = 3s """) { val controller = role("controller") val first = role("first") diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingCustomShardAllocationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingCustomShardAllocationSpec.scala index 4796ecb75d..ba2060f28c 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingCustomShardAllocationSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingCustomShardAllocationSpec.scala @@ -65,8 +65,8 @@ abstract class ClusterShardingCustomShardAllocationSpecConfig(mode: String) extends MultiNodeClusterShardingConfig( mode, additionalConfig = s""" - akka.cluster.sharding.rebalance-interval = 1 s - akka.persistence.journal.leveldb-shared.store.native = off + pekko.cluster.sharding.rebalance-interval = 1 s + pekko.persistence.journal.leveldb-shared.store.native = off """) { val first = role("first") diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingFailureSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingFailureSpec.scala index cd95368ed6..d683df46f4 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingFailureSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingFailureSpec.scala @@ -55,18 +55,18 @@ abstract class ClusterShardingFailureSpecConfig(override val mode: String) extends MultiNodeClusterShardingConfig( mode, additionalConfig = s""" - akka.loglevel=DEBUG - akka.cluster.roles = ["backend"] - akka.cluster.sharding { + pekko.loglevel=DEBUG + pekko.cluster.roles = ["backend"] + pekko.cluster.sharding { coordinator-failure-backoff = 3s shard-failure-backoff = 3s } # don't leak ddata state across runs - akka.cluster.sharding.distributed-data.durable.keys = [] - akka.persistence.journal.leveldb-shared.store.native = off + pekko.cluster.sharding.distributed-data.durable.keys = [] + pekko.persistence.journal.leveldb-shared.store.native = off # using Java serialization for these messages because test is sending them # to other nodes, which isn't normal usage. - akka.actor.serialization-bindings { + pekko.actor.serialization-bindings { "${classOf[ShardRegion.Passivate].getName}" = java-test } """, diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGetStateSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGetStateSpec.scala index 2640660a03..07061c5490 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGetStateSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGetStateSpec.scala @@ -32,7 +32,7 @@ object ClusterShardingGetStateSpec { } object ClusterShardingGetStateSpecConfig extends MultiNodeClusterShardingConfig(additionalConfig = s""" - akka.cluster.sharding { + pekko.cluster.sharding { coordinator-failure-backoff = 3s shard-failure-backoff = 3s } @@ -42,7 +42,7 @@ object ClusterShardingGetStateSpecConfig extends MultiNodeClusterShardingConfig( val first = role("first") val second = role("second") - nodeConfig(first, second)(ConfigFactory.parseString("""akka.cluster.roles=["shard"]""")) + nodeConfig(first, second)(ConfigFactory.parseString("""pekko.cluster.roles=["shard"]""")) } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGetStatsSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGetStatsSpec.scala index eaa2bf0dd6..b180c5ddf5 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGetStatsSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGetStatsSpec.scala @@ -32,9 +32,9 @@ object ClusterShardingGetStatsSpec { object ClusterShardingGetStatsSpecConfig extends MultiNodeClusterShardingConfig(additionalConfig = """ - akka.log-dead-letters-during-shutdown = off - akka.cluster.sharding.updating-state-timeout = 2s - akka.cluster.sharding.waiting-for-state-timeout = 2s + pekko.log-dead-letters-during-shutdown = off + pekko.cluster.sharding.updating-state-timeout = 2s + pekko.cluster.sharding.waiting-for-state-timeout = 2s """) { val controller = role("controller") @@ -42,7 +42,7 @@ object ClusterShardingGetStatsSpecConfig val second = role("second") val third = role("third") - nodeConfig(first, second, third)(ConfigFactory.parseString("""akka.cluster.roles=["shard"]""")) + nodeConfig(first, second, third)(ConfigFactory.parseString("""pekko.cluster.roles=["shard"]""")) } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGracefulShutdownOldestSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGracefulShutdownOldestSpec.scala index 561b66086b..e30c9e5a43 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGracefulShutdownOldestSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGracefulShutdownOldestSpec.scala @@ -16,7 +16,7 @@ import scala.concurrent.Await abstract class ClusterShardingGracefulShutdownOldestSpecConfig(mode: String) extends MultiNodeClusterShardingConfig( mode, - additionalConfig = "akka.persistence.journal.leveldb-shared.store.native = off") { + additionalConfig = "pekko.persistence.journal.leveldb-shared.store.native = off") { val first = role("first") val second = role("second") } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala index 9c0e7456ed..2bb1140a5d 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala @@ -21,11 +21,11 @@ abstract class ClusterShardingGracefulShutdownSpecConfig(mode: String) mode, additionalConfig = """ - akka.loglevel = info - akka.persistence.journal.leveldb-shared.store.native = off + pekko.loglevel = info + pekko.persistence.journal.leveldb-shared.store.native = off # We set this high to allow pausing coordinated shutdown make sure the handoff completes 'immediately' and not # relies on the member removal, which could make things take longer then necessary - akka.coordinated-shutdown.phases.cluster-sharding-shutdown-region.timeout = 60s + pekko.coordinated-shutdown.phases.cluster-sharding-shutdown-region.timeout = 60s """) { val first = role("first") val second = role("second") diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala index 8880f469a0..c61472fca1 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala @@ -8,7 +8,7 @@ import org.apache.pekko.testkit._ object ClusterShardingIncorrectSetupSpecConfig extends MultiNodeClusterShardingConfig( - additionalConfig = "akka.cluster.sharding.waiting-for-state-timeout = 100ms") { + additionalConfig = "pekko.cluster.sharding.waiting-for-state-timeout = 100ms") { val first = role("first") val second = role("second") diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingLeavingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingLeavingSpec.scala index 6e7d6898f7..79168cddbb 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingLeavingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingLeavingSpec.scala @@ -50,11 +50,11 @@ abstract class ClusterShardingLeavingSpecConfig(mode: String) loglevel = "DEBUG", additionalConfig = """ - akka.cluster.sharding.verbose-debug-logging = on - akka.cluster.sharding.rebalance-interval = 1s # make rebalancing more likely to happen to test for https://github.com/akka/akka/issues/29093 - akka.cluster.sharding.distributed-data.majority-min-cap = 1 - akka.cluster.sharding.coordinator-state.write-majority-plus = 1 - akka.cluster.sharding.coordinator-state.read-majority-plus = 1 + pekko.cluster.sharding.verbose-debug-logging = on + pekko.cluster.sharding.rebalance-interval = 1s # make rebalancing more likely to happen to test for https://github.com/akka/akka/issues/29093 + pekko.cluster.sharding.distributed-data.majority-min-cap = 1 + pekko.cluster.sharding.coordinator-state.write-majority-plus = 1 + pekko.cluster.sharding.coordinator-state.read-majority-plus = 1 """) { val first = role("first") val second = role("second") diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingMinMembersSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingMinMembersSpec.scala index 0d782d6bf1..83d8305b6d 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingMinMembersSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingMinMembersSpec.scala @@ -18,8 +18,8 @@ abstract class ClusterShardingMinMembersSpecConfig(mode: String) extends MultiNodeClusterShardingConfig( mode, additionalConfig = s""" - akka.cluster.sharding.rebalance-interval = 120s #disable rebalance - akka.cluster.min-nr-of-members = 3 + pekko.cluster.sharding.rebalance-interval = 120s #disable rebalance + pekko.cluster.min-nr-of-members = 3 """) { val first = role("first") diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingQueriesSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingQueriesSpec.scala index 944168de07..9f28a18e0f 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingQueriesSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingQueriesSpec.scala @@ -30,8 +30,8 @@ object ClusterShardingQueriesSpec { } object ClusterShardingQueriesSpecConfig extends MultiNodeClusterShardingConfig(additionalConfig = s""" - akka.log-dead-letters-during-shutdown = off - akka.cluster.sharding { + pekko.log-dead-letters-during-shutdown = off + pekko.cluster.sharding { shard-region-query-timeout = 2ms updating-state-timeout = 2s waiting-for-state-timeout = 2s @@ -43,10 +43,10 @@ object ClusterShardingQueriesSpecConfig extends MultiNodeClusterShardingConfig(a val second = role("second") val third = role("third") - val shardRoles = ConfigFactory.parseString("""akka.cluster.roles=["shard"]""") + val shardRoles = ConfigFactory.parseString("""pekko.cluster.roles=["shard"]""") nodeConfig(busy)( - ConfigFactory.parseString("akka.cluster.sharding.shard-region-query-timeout = 0ms").withFallback(shardRoles)) + ConfigFactory.parseString("pekko.cluster.sharding.shard-region-query-timeout = 0ms").withFallback(shardRoles)) nodeConfig(second, third)(shardRoles) } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala index 6b10b40116..4406c76d2a 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala @@ -53,24 +53,24 @@ object ClusterShardingRememberEntitiesNewExtractorSpec { abstract class ClusterShardingRememberEntitiesNewExtractorSpecConfig(mode: String) extends MultiNodeClusterShardingConfig( mode, - additionalConfig = "akka.persistence.journal.leveldb-shared.store.native = off") { + additionalConfig = "pekko.persistence.journal.leveldb-shared.store.native = off") { val first = role("first") val second = role("second") val third = role("third") val roleConfig = ConfigFactory.parseString(""" - akka.cluster.roles = [sharding] + pekko.cluster.roles = [sharding] """) // we pretend node 4 and 5 are new incarnations of node 2 and 3 as they never run in parallel // so we can use the same lmdb store for them and have node 4 pick up the persisted data of node 2 val ddataNodeAConfig = ConfigFactory.parseString(""" - akka.cluster.sharding.distributed-data.durable.lmdb { + pekko.cluster.sharding.distributed-data.durable.lmdb { dir = target/ShardingRememberEntitiesNewExtractorSpec/sharding-node-a } """) val ddataNodeBConfig = ConfigFactory.parseString(""" - akka.cluster.sharding.distributed-data.durable.lmdb { + pekko.cluster.sharding.distributed-data.durable.lmdb { dir = target/ShardingRememberEntitiesNewExtractorSpec/sharding-node-b } """) diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala index d0cac2efa9..dffd8edff8 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala @@ -39,9 +39,9 @@ abstract class ClusterShardingRememberEntitiesSpecConfig( rememberEntities, rememberEntitiesStore = rememberEntitiesStore, additionalConfig = s""" - akka.testconductor.barrier-timeout = 60 s - akka.test.single-expect-default = 60 s - akka.persistence.journal.leveldb-shared.store.native = off + pekko.testconductor.barrier-timeout = 60 s + pekko.test.single-expect-default = 60 s + pekko.persistence.journal.leveldb-shared.store.native = off """) { val first = role("first") @@ -49,7 +49,7 @@ abstract class ClusterShardingRememberEntitiesSpecConfig( val third = role("third") nodeConfig(third)(ConfigFactory.parseString(s""" - akka.cluster.sharding.distributed-data.durable.lmdb { + pekko.cluster.sharding.distributed-data.durable.lmdb { # use same directory when starting new node on third (not used at same time) dir = $targetDir/sharding-third } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRolePartitioningSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRolePartitioningSpec.scala index 8c64e3cb59..1051a5afc6 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRolePartitioningSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingRolePartitioningSpec.scala @@ -46,14 +46,14 @@ abstract class ClusterShardingMinMembersPerRoleConfig extends MultiNodeClusterSh val fourth = role("fourth") val fifth = role("fifth") - val r1Config: Config = ConfigFactory.parseString("""akka.cluster.roles = [ "R1" ]""") - val r2Config: Config = ConfigFactory.parseString("""akka.cluster.roles = [ "R2" ]""") + val r1Config: Config = ConfigFactory.parseString("""pekko.cluster.roles = [ "R1" ]""") + val r2Config: Config = ConfigFactory.parseString("""pekko.cluster.roles = [ "R2" ]""") } object ClusterShardingMinMembersPerRoleNotConfiguredConfig extends ClusterShardingMinMembersPerRoleConfig { - val commonRoleConfig: Config = ConfigFactory.parseString("akka.cluster.min-nr-of-members = 2") + val commonRoleConfig: Config = ConfigFactory.parseString("pekko.cluster.min-nr-of-members = 2") nodeConfig(first, second, third)(r1Config.withFallback(commonRoleConfig)) @@ -64,9 +64,9 @@ object ClusterShardingMinMembersPerRoleConfiguredConfig extends ClusterShardingM val commonRoleConfig = ConfigFactory.parseString(""" - akka.cluster.min-nr-of-members = 3 - akka.cluster.role.R1.min-nr-of-members = 3 - akka.cluster.role.R2.min-nr-of-members = 2 + pekko.cluster.min-nr-of-members = 3 + pekko.cluster.role.R1.min-nr-of-members = 3 + pekko.cluster.role.R2.min-nr-of-members = 2 """) nodeConfig(first, second, third)(r1Config.withFallback(commonRoleConfig)) @@ -108,7 +108,7 @@ abstract class ClusterShardingRolePartitioningSpec(multiNodeConfig: ClusterShard "Cluster Sharding with roles" must { - "start the cluster, await convergence, init sharding on every node: 2 data types, 'akka.cluster.min-nr-of-members=2', partition shard location by 2 roles" in { + "start the cluster, await convergence, init sharding on every node: 2 data types, 'pekko.cluster.min-nr-of-members=2', partition shard location by 2 roles" in { // start sharding early startSharding( system, diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala index 2d26f6ba91..8a6f91c4de 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala @@ -16,7 +16,7 @@ import pekko.testkit._ * one-to-one mapping between shards and entities is not efficient but some use that anyway */ object ClusterShardingSingleShardPerEntitySpecConfig - extends MultiNodeClusterShardingConfig(additionalConfig = "akka.cluster.sharding.updating-state-timeout = 1s") { + extends MultiNodeClusterShardingConfig(additionalConfig = "pekko.cluster.sharding.updating-state-timeout = 1s") { val first = role("first") val second = role("second") diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingSpec.scala index d0a2775067..d04e4706c7 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ClusterShardingSpec.scala @@ -134,15 +134,15 @@ abstract class ClusterShardingSpecConfig( * mode, then leverage the common config and fallbacks after these specific test configs: */ commonConfig(ConfigFactory.parseString(s""" - akka.loglevel = "DEBUG" + pekko.loglevel = "DEBUG" - akka.cluster.sharding.verbose-debug-logging = on - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.cluster.sharding.verbose-debug-logging = on + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.cluster.roles = ["backend"] - akka.cluster.distributed-data.gossip-interval = 1s - akka.persistence.journal.leveldb-shared.timeout = 10s #the original default, base test uses 5s - akka.cluster.sharding { + pekko.cluster.roles = ["backend"] + pekko.cluster.distributed-data.gossip-interval = 1s + pekko.persistence.journal.leveldb-shared.timeout = 10s #the original default, base test uses 5s + pekko.cluster.sharding { retry-interval = 1 s handoff-timeout = 10 s shard-start-timeout = 5s @@ -158,11 +158,11 @@ abstract class ClusterShardingSpecConfig( rebalance-relative-limit = 1.0 } } - akka.testconductor.barrier-timeout = 70s + pekko.testconductor.barrier-timeout = 70s # using Java serialization for the messages here because would be to much (unrelated) # to show full Jackson serialization in docs (requires annotations because of envelope and such) - akka.actor.serialization-bindings { + pekko.actor.serialization-bindings { "${ClusterShardingSpec.Increment.getClass.getName}" = java-test "${ClusterShardingSpec.Decrement.getClass.getName}" = java-test "${classOf[ClusterShardingSpec.Get].getName}" = java-test @@ -176,7 +176,7 @@ abstract class ClusterShardingSpecConfig( """).withFallback(MultiNodeClusterShardingConfig.persistenceConfig(targetDir)).withFallback(common)) nodeConfig(sixth) { - ConfigFactory.parseString("""akka.cluster.roles = ["frontend"]""") + ConfigFactory.parseString("""pekko.cluster.roles = ["frontend"]""") } } @@ -294,7 +294,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) "replicator") def ddataRememberEntitiesProvider(typeName: String) = { - val majorityMinCap = system.settings.config.getInt("akka.cluster.sharding.distributed-data.majority-min-cap") + val majorityMinCap = system.settings.config.getInt("pekko.cluster.sharding.distributed-data.majority-min-cap") new DDataRememberEntitiesProvider(typeName, settings, majorityMinCap, replicator) } @@ -311,13 +311,13 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) handoff-timeout = 10s shard-start-timeout = 10s rebalance-interval = ${if (rebalanceEnabled) "2s" else "3600s"} - """).withFallback(system.settings.config.getConfig("akka.cluster.sharding")) + """).withFallback(system.settings.config.getConfig("pekko.cluster.sharding")) val settings = ClusterShardingSettings(cfg).withRememberEntities(rememberEntities) if (settings.stateStoreMode == "persistence") ShardCoordinator.props(typeName, settings, allocationStrategy) else { - val majorityMinCap = system.settings.config.getInt("akka.cluster.sharding.distributed-data.majority-min-cap") + val majorityMinCap = system.settings.config.getInt("pekko.cluster.sharding.distributed-data.majority-min-cap") val rememberEntitiesStore = // only store provider if ddata for now, persistence uses all-in-one-coordinator if (settings.rememberEntities) Some(ddataRememberEntitiesProvider(typeName)) @@ -367,7 +367,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) shard-failure-backoff = 1s entity-restart-backoff = 1s buffer-size = 1000 - """).withFallback(system.settings.config.getConfig("akka.cluster.sharding")) + """).withFallback(system.settings.config.getConfig("pekko.cluster.sharding")) val settings = ClusterShardingSettings(cfg).withRememberEntities(rememberEntities) val rememberEntitiesProvider = if (!rememberEntities) None @@ -505,7 +505,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) val cfg = ConfigFactory.parseString(""" retry-interval = 1s buffer-size = 1000 - """).withFallback(system.settings.config.getConfig("akka.cluster.sharding")) + """).withFallback(system.settings.config.getConfig("pekko.cluster.sharding")) val settings = ClusterShardingSettings(cfg) val proxy = system.actorOf( ShardRegion.proxyProps( diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ExternalShardAllocationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ExternalShardAllocationSpec.scala index 120980e2c0..abcbac04fe 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ExternalShardAllocationSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/ExternalShardAllocationSpec.scala @@ -19,7 +19,7 @@ import pekko.testkit.{ ImplicitSender, TestProbe } object ExternalShardAllocationSpecConfig extends MultiNodeClusterShardingConfig(additionalConfig = """ - akka.cluster.sharding { + pekko.cluster.sharding { retry-interval = 2000ms waiting-for-state-timeout = 2000ms rebalance-interval = 1s diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/MultiDcClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/MultiDcClusterShardingSpec.scala index a4424c50d8..9f09486305 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/MultiDcClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/MultiDcClusterShardingSpec.scala @@ -50,12 +50,12 @@ object MultiDcClusterShardingSpecConfig extends MultiNodeClusterShardingConfig( loglevel = "DEBUG", // issue #23741 additionalConfig = s""" - akka.cluster { + pekko.cluster { debug.verbose-heartbeat-logging = on debug.verbose-gossip-logging = on sharding.retry-interval = 200ms } - akka.remote.log-remote-lifecycle-events = on + pekko.remote.log-remote-lifecycle-events = on """) { val first = role("first") @@ -64,11 +64,11 @@ object MultiDcClusterShardingSpecConfig val fourth = role("fourth") nodeConfig(first, second) { - ConfigFactory.parseString("akka.cluster.multi-data-center.self-data-center = DC1") + ConfigFactory.parseString("pekko.cluster.multi-data-center.self-data-center = DC1") } nodeConfig(third, fourth) { - ConfigFactory.parseString("akka.cluster.multi-data-center.self-data-center = DC2") + ConfigFactory.parseString("pekko.cluster.multi-data-center.self-data-center = DC2") } } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/MultiNodeClusterShardingConfig.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/MultiNodeClusterShardingConfig.scala index 498c001e5d..17511a7d04 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/MultiNodeClusterShardingConfig.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/MultiNodeClusterShardingConfig.scala @@ -59,16 +59,16 @@ object MultiNodeClusterShardingConfig { def persistenceConfig(targetDir: String): Config = ConfigFactory.parseString(s""" - akka.persistence.journal.plugin = "akka.persistence.journal.leveldb-shared" - akka.persistence.journal.leveldb-shared { + pekko.persistence.journal.plugin = "pekko.persistence.journal.leveldb-shared" + pekko.persistence.journal.leveldb-shared { timeout = 5s store { native = off dir = "$targetDir/journal" } } - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "$targetDir/snapshots" + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "$targetDir/snapshots" """) } @@ -106,19 +106,19 @@ abstract class MultiNodeClusterShardingConfig( val common: Config = ConfigFactory .parseString(s""" - akka.actor.provider = "cluster" - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 0s - akka.cluster.sharding.state-store-mode = "$mode" - akka.cluster.sharding.remember-entities = $rememberEntities - akka.cluster.sharding.remember-entities-store = "$rememberEntitiesStore" - akka.cluster.sharding.distributed-data.durable.lmdb { + pekko.actor.provider = "cluster" + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 0s + pekko.cluster.sharding.state-store-mode = "$mode" + pekko.cluster.sharding.remember-entities = $rememberEntities + pekko.cluster.sharding.remember-entities-store = "$rememberEntitiesStore" + pekko.cluster.sharding.distributed-data.durable.lmdb { dir = $targetDir/sharding-ddata map-size = 10 MiB } - akka.cluster.sharding.fail-on-invalid-entity-state-transition = on - akka.loglevel = $loglevel - akka.remote.log-remote-lifecycle-events = off + pekko.cluster.sharding.fail-on-invalid-entity-state-transition = on + pekko.loglevel = $loglevel + pekko.remote.log-remote-lifecycle-events = off """) .withFallback(SharedLeveldbJournal.configToEnableJavaSerializationForTest) .withFallback(MultiNodeClusterSpec.clusterConfig) diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/MultiNodeClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/MultiNodeClusterShardingSpec.scala index 9f3e268f6d..b09eb9d370 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/MultiNodeClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/MultiNodeClusterShardingSpec.scala @@ -92,7 +92,8 @@ abstract class MultiNodeClusterShardingSpec(val config: MultiNodeClusterSharding ClusterSharding(system).defaultShardAllocationStrategy(settings) protected lazy val storageLocations = List( - new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) + new File( + system.settings.config.getString("pekko.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) override def expectedTestDuration = 120.seconds @@ -175,7 +176,7 @@ abstract class MultiNodeClusterShardingSpec(val config: MultiNodeClusterSharding mode == ClusterShardingSettings.StateStoreModePersistence || system.settings.config .getString( - "akka.cluster.sharding.remember-entities-store") == ClusterShardingSettings.RememberEntitiesStoreEventsourced + "pekko.cluster.sharding.remember-entities-store") == ClusterShardingSettings.RememberEntitiesStoreEventsourced protected def setStoreIfNeeded(sys: ActorSystem, storeOn: RoleName): Unit = if (persistenceIsNeeded) setStore(sys, storeOn) diff --git a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/RollingUpdateShardAllocationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/RollingUpdateShardAllocationSpec.scala index 7b8b58f5d4..6911419218 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/RollingUpdateShardAllocationSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/org/apache/pekko/cluster/sharding/RollingUpdateShardAllocationSpec.scala @@ -19,15 +19,15 @@ import scala.concurrent.duration._ object RollingUpdateShardAllocationSpecConfig extends MultiNodeClusterShardingConfig( additionalConfig = """ - akka.cluster.sharding { + pekko.cluster.sharding { # speed up forming and handovers a bit retry-interval = 500ms waiting-for-state-timeout = 500ms rebalance-interval = 1s # we are leaving cluster nodes but they need to stay in test - akka.coordinated-shutdown.terminate-actor-system = off + pekko.coordinated-shutdown.terminate-actor-system = off # use the new LeastShardAllocationStrategy - akka.cluster.sharding.least-shard-allocation-strategy.rebalance-absolute-limit = 1 + pekko.cluster.sharding.least-shard-allocation-strategy.rebalance-absolute-limit = 1 } """) { @@ -37,11 +37,11 @@ object RollingUpdateShardAllocationSpecConfig val fourth = role("fourth") nodeConfig(first, second)(ConfigFactory.parseString(""" - akka.cluster.app-version = 1.0.0 + pekko.cluster.app-version = 1.0.0 """)) nodeConfig(third, fourth)(ConfigFactory.parseString(""" - akka.cluster.app-version = 1.0.1 + pekko.cluster.app-version = 1.0.1 """)) } diff --git a/akka-cluster-sharding/src/test/resources/adaptivity-trace.conf b/akka-cluster-sharding/src/test/resources/adaptivity-trace.conf index d2ad0a9275..e6a110924c 100644 --- a/akka-cluster-sharding/src/test/resources/adaptivity-trace.conf +++ b/akka-cluster-sharding/src/test/resources/adaptivity-trace.conf @@ -39,7 +39,7 @@ corda-traces=${?CORDA_TRACES} lirs-traces="lirs-traces" lirs-traces=${?LIRS_TRACES} -akka.cluster.sharding { +pekko.cluster.sharding { passivation.simulator { runs = [ { @@ -228,15 +228,15 @@ akka.cluster.sharding { } } - lru-fs-slru-500 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-500 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 500 } - lru-fs-slru-hc = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-hc = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.admission.optimizer = hill-climbing } - lru-fs-slru-hc-500 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-500 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 500 } } diff --git a/akka-cluster-sharding/src/test/resources/arc-trace-database.conf b/akka-cluster-sharding/src/test/resources/arc-trace-database.conf index 4095a31171..d5f7da253b 100644 --- a/akka-cluster-sharding/src/test/resources/arc-trace-database.conf +++ b/akka-cluster-sharding/src/test/resources/arc-trace-database.conf @@ -88,7 +88,7 @@ arc-traces="arc-traces" arc-traces=${?ARC_TRACES} -akka.cluster.sharding { +pekko.cluster.sharding { passivation.simulator { runs = [ { @@ -595,39 +595,39 @@ akka.cluster.sharding { } } - lru-fs-slru-100k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-100k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 100000 } - lru-fs-slru-200k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-200k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 200000 } - lru-fs-slru-400k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-400k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 400000 } - lru-fs-slru-800k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-800k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 800000 } - lru-fs-slru-hc = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-hc = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.admission.optimizer = hill-climbing } - lru-fs-slru-hc-100k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-100k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 100000 } - lru-fs-slru-hc-200k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-200k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 200000 } - lru-fs-slru-hc-400k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-400k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 400000 } - lru-fs-slru-hc-800k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-800k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 800000 } diff --git a/akka-cluster-sharding/src/test/resources/arc-trace-search.conf b/akka-cluster-sharding/src/test/resources/arc-trace-search.conf index 5e3e6b0032..4c9626b286 100644 --- a/akka-cluster-sharding/src/test/resources/arc-trace-search.conf +++ b/akka-cluster-sharding/src/test/resources/arc-trace-search.conf @@ -70,7 +70,7 @@ arc-traces="arc-traces" arc-traces=${?ARC_TRACES} -akka.cluster.sharding { +pekko.cluster.sharding { passivation.simulator { runs = [ { @@ -459,31 +459,31 @@ akka.cluster.sharding { } } - lru-fs-slru-25k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-25k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 25000 } - lru-fs-slru-50k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-50k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 50000 } - lru-fs-slru-100k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-100k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 100000 } - lru-fs-slru-hc = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-hc = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.admission.optimizer = hill-climbing } - lru-fs-slru-hc-25k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-25k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 25000 } - lru-fs-slru-hc-50k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-50k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 50000 } - lru-fs-slru-hc-100k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-100k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 100000 } diff --git a/akka-cluster-sharding/src/test/resources/lirs-trace-glimpse.conf b/akka-cluster-sharding/src/test/resources/lirs-trace-glimpse.conf index d0e4eea34c..969940ad29 100644 --- a/akka-cluster-sharding/src/test/resources/lirs-trace-glimpse.conf +++ b/akka-cluster-sharding/src/test/resources/lirs-trace-glimpse.conf @@ -88,7 +88,7 @@ lirs-traces="lirs-traces" lirs-traces=${?LIRS_TRACES} -akka.cluster.sharding { +pekko.cluster.sharding { passivation.simulator { runs = [ { @@ -595,39 +595,39 @@ akka.cluster.sharding { } } - lru-fs-slru-250 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-250 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 250 } - lru-fs-slru-500 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-500 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 500 } - lru-fs-slru-1000 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-1000 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 1000 } - lru-fs-slru-1500 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-1500 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 1500 } - lru-fs-slru-hc = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-hc = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.admission.optimizer = hill-climbing } - lru-fs-slru-hc-250 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-250 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 250 } - lru-fs-slru-hc-500 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-500 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 500 } - lru-fs-slru-hc-1000 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-1000 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 1000 } - lru-fs-slru-hc-1500 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-1500 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 1500 } diff --git a/akka-cluster-sharding/src/test/resources/lirs-trace-multi.conf b/akka-cluster-sharding/src/test/resources/lirs-trace-multi.conf index 1e9ee5afbc..9f4e9c8412 100644 --- a/akka-cluster-sharding/src/test/resources/lirs-trace-multi.conf +++ b/akka-cluster-sharding/src/test/resources/lirs-trace-multi.conf @@ -106,7 +106,7 @@ lirs-traces="lirs-traces" lirs-traces=${?LIRS_TRACES} -akka.cluster.sharding { +pekko.cluster.sharding { passivation.simulator { runs = [ { @@ -731,47 +731,47 @@ akka.cluster.sharding { } } - lru-fs-slru-100 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-100 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 100 } - lru-fs-slru-200 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-200 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 200 } - lru-fs-slru-400 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-400 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 400 } - lru-fs-slru-800 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-800 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 800 } - lru-fs-slru-1600 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-1600 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 1600 } - lru-fs-slru-hc = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-hc = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.admission.optimizer = hill-climbing } - lru-fs-slru-hc-100 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-100 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 100 } - lru-fs-slru-hc-200 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-200 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 200 } - lru-fs-slru-hc-400 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-400 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 400 } - lru-fs-slru-hc-800 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-800 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 800 } - lru-fs-slru-hc-1600 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-1600 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 1600 } diff --git a/akka-cluster-sharding/src/test/resources/lirs-trace-postgres.conf b/akka-cluster-sharding/src/test/resources/lirs-trace-postgres.conf index e3e75fea7d..edaf0a881f 100644 --- a/akka-cluster-sharding/src/test/resources/lirs-trace-postgres.conf +++ b/akka-cluster-sharding/src/test/resources/lirs-trace-postgres.conf @@ -88,7 +88,7 @@ lirs-traces="lirs-traces" lirs-traces=${?LIRS_TRACES} -akka.cluster.sharding { +pekko.cluster.sharding { passivation.simulator { runs = [ { @@ -595,39 +595,39 @@ akka.cluster.sharding { } } - lru-fs-slru-125 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-125 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 125 } - lru-fs-slru-250 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-250 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 250 } - lru-fs-slru-500 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-500 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 500 } - lru-fs-slru-1000 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-1000 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 1000 } - lru-fs-slru-hc = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-hc = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.admission.optimizer = hill-climbing } - lru-fs-slru-hc-125 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-125 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 125 } - lru-fs-slru-hc-250 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-250 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 250 } - lru-fs-slru-hc-500 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-500 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 500 } - lru-fs-slru-hc-1000 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-1000 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 1000 } diff --git a/akka-cluster-sharding/src/test/resources/lirs2-trace-w106.conf b/akka-cluster-sharding/src/test/resources/lirs2-trace-w106.conf index 2112145953..7a40c79905 100644 --- a/akka-cluster-sharding/src/test/resources/lirs2-trace-w106.conf +++ b/akka-cluster-sharding/src/test/resources/lirs2-trace-w106.conf @@ -91,7 +91,7 @@ lirs2-traces="lirs2-traces" lirs2-traces=${?LIRS2_TRACES} -akka.cluster.sharding { +pekko.cluster.sharding { passivation.simulator { runs = [ { @@ -598,39 +598,39 @@ akka.cluster.sharding { } } - lru-fs-slru-50 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-50 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 50 } - lru-fs-slru-100 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-100 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 100 } - lru-fs-slru-200 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-200 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 200 } - lru-fs-slru-500 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-500 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 500 } - lru-fs-slru-hc = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-hc = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.admission.optimizer = hill-climbing } - lru-fs-slru-hc-50 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-50 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 50 } - lru-fs-slru-hc-100 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-100 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 100 } - lru-fs-slru-hc-200 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-200 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 200 } - lru-fs-slru-hc-500 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-500 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 500 } diff --git a/akka-cluster-sharding/src/test/resources/reference.conf b/akka-cluster-sharding/src/test/resources/reference.conf index 9c35785593..d43bd1a978 100644 --- a/akka-cluster-sharding/src/test/resources/reference.conf +++ b/akka-cluster-sharding/src/test/resources/reference.conf @@ -1,4 +1,4 @@ -akka.cluster.sharding { +pekko.cluster.sharding { passivation.simulator { runs = [] diff --git a/akka-cluster-sharding/src/test/resources/synthetic-loop.conf b/akka-cluster-sharding/src/test/resources/synthetic-loop.conf index c21d6abebb..f316afcbba 100644 --- a/akka-cluster-sharding/src/test/resources/synthetic-loop.conf +++ b/akka-cluster-sharding/src/test/resources/synthetic-loop.conf @@ -16,7 +16,7 @@ # ╚═════════════════════╧═════════╧════════════╧═════════════╧══════════════╝ # -akka.cluster.sharding { +pekko.cluster.sharding { passivation.simulator { runs = [ { @@ -103,15 +103,15 @@ akka.cluster.sharding { } } - lru-fs-slru-50k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-50k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 50000 } - lru-fs-slru-hc = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-hc = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.admission.optimizer = hill-climbing } - lru-fs-slru-hc-50k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-50k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 50000 } diff --git a/akka-cluster-sharding/src/test/resources/synthetic-zipfian.conf b/akka-cluster-sharding/src/test/resources/synthetic-zipfian.conf index 8db75db9ab..0f9bf39f00 100644 --- a/akka-cluster-sharding/src/test/resources/synthetic-zipfian.conf +++ b/akka-cluster-sharding/src/test/resources/synthetic-zipfian.conf @@ -76,7 +76,7 @@ # ╚════════════════════╧═════════╧════════════╧═════════════╧══════════════╝ # -akka.cluster.sharding { +pekko.cluster.sharding { passivation.simulator { runs = [ { @@ -301,16 +301,16 @@ akka.cluster.sharding { } } - lru-fs-slru-1k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-1k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 1000 } # Adaptive (hill climbing) Window-TinyLFU strategy - lru-fs-slru-hc = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-hc = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.admission.optimizer = hill-climbing } - lru-fs-slru-hc-1k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-1k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 1000 } diff --git a/akka-cluster-sharding/src/test/resources/text-moby-dick.conf b/akka-cluster-sharding/src/test/resources/text-moby-dick.conf index 48365030fe..f7de9e300a 100644 --- a/akka-cluster-sharding/src/test/resources/text-moby-dick.conf +++ b/akka-cluster-sharding/src/test/resources/text-moby-dick.conf @@ -67,7 +67,7 @@ text-traces="text-traces" text-traces=${?TEXT_TRACES} -akka.cluster.sharding { +pekko.cluster.sharding { passivation.simulator { runs = [ { @@ -455,31 +455,31 @@ akka.cluster.sharding { } } - lru-fs-slru-25 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-25 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 25 } - lru-fs-slru-50 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-50 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 50 } - lru-fs-slru-100 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-100 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 100 } - lru-fs-slru-hc = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-hc = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.admission.optimizer = hill-climbing } - lru-fs-slru-hc-25 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-25 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 25 } - lru-fs-slru-hc-50 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-50 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 50 } - lru-fs-slru-hc-100 = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-100 = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 100 } diff --git a/akka-cluster-sharding/src/test/resources/wikipedia-trace-2018.conf b/akka-cluster-sharding/src/test/resources/wikipedia-trace-2018.conf index 6296c89d72..a42d929b2a 100644 --- a/akka-cluster-sharding/src/test/resources/wikipedia-trace-2018.conf +++ b/akka-cluster-sharding/src/test/resources/wikipedia-trace-2018.conf @@ -37,7 +37,7 @@ wiki-traces="wiki-traces" wiki-traces=${?WIKI_TRACES} -akka.cluster.sharding { +pekko.cluster.sharding { passivation.simulator { runs = [ # { @@ -176,15 +176,15 @@ akka.cluster.sharding { } } - lru-fs-slru-10k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-10k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.per-region-limit = 10000 } - lru-fs-slru-hc = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru} { + lru-fs-slru-hc = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru} { composite.admission.optimizer = hill-climbing } - lru-fs-slru-hc-10k = ${akka.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { + lru-fs-slru-hc-10k = ${pekko.cluster.sharding.passivation.simulator.lru-fs-slru-hc} { composite.per-region-limit = 10000 } diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingHealthCheckSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingHealthCheckSpec.scala index 18956a2b08..34f930d53e 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingHealthCheckSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingHealthCheckSpec.scala @@ -15,8 +15,8 @@ import scala.concurrent.duration._ object ClusterShardingHealthCheckSpec { val config = ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] """.stripMargin) } diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingInternalsSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingInternalsSpec.scala index c5d5bbc21b..e631b50f29 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingInternalsSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingInternalsSpec.scala @@ -29,13 +29,13 @@ object ClusterShardingInternalsSpec { } class ClusterShardingInternalsSpec extends AkkaSpec(""" - |akka.actor.provider = cluster - |akka.remote.classic.netty.tcp.port = 0 - |akka.remote.artery.canonical.port = 0 - |akka.loglevel = DEBUG - |akka.cluster.sharding.verbose-debug-logging = on - |akka.cluster.sharding.fail-on-invalid-entity-state-transition = on - |akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + |pekko.actor.provider = cluster + |pekko.remote.classic.netty.tcp.port = 0 + |pekko.remote.artery.canonical.port = 0 + |pekko.loglevel = DEBUG + |pekko.cluster.sharding.verbose-debug-logging = on + |pekko.cluster.sharding.fail-on-invalid-entity-state-transition = on + |pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] |""".stripMargin) with WithLogCapturing { import ClusterShardingInternalsSpec._ diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingLeaseSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingLeaseSpec.scala index fc13e73062..5e07d7e6b1 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingLeaseSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingLeaseSpec.scala @@ -19,12 +19,12 @@ import pekko.testkit.TestActors.EchoActor object ClusterShardingLeaseSpec { val config = ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.cluster.sharding { + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.cluster.sharding { use-lease = "test-lease" lease-retry-interval = 200ms distributed-data.durable { @@ -36,14 +36,14 @@ object ClusterShardingLeaseSpec { """).withFallback(TestLease.config) val persistenceConfig = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { state-store-mode = persistence - journal-plugin-id = "akka.persistence.journal.inmem" + journal-plugin-id = "pekko.persistence.journal.inmem" } """) val ddataConfig = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { state-store-mode = ddata } """) diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingSettingsSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingSettingsSpec.scala index 79f1af7128..999f174609 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingSettingsSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ClusterShardingSettingsSpec.scala @@ -36,7 +36,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow timeout for (default) idle passivation strategy to be configured (via config)" in { settings(""" #passivation-idle-timeout - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { default-idle-strategy.idle-entity.timeout = 3 minutes } #passivation-idle-timeout @@ -56,7 +56,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow timeout and interval for (default) idle passivation strategy to be configured (via config)" in { settings(""" - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { default-idle-strategy { idle-entity { timeout = 3 minutes @@ -81,7 +81,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow new default passivation strategy to be enabled (via config)" in { settings(""" #passivation-new-default-strategy - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = default-strategy } #passivation-new-default-strategy @@ -107,7 +107,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow new default passivation strategy limit to be configured (via config)" in { settings(""" #passivation-new-default-strategy-configured - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = default-strategy default-strategy { active-entity-limit = 1000000 @@ -136,7 +136,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow new default passivation strategy with idle timeout to be configured (via config)" in { settings(""" #passivation-new-default-strategy-with-idle - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = default-strategy default-strategy { idle-entity.timeout = 30.minutes @@ -166,7 +166,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { settings(""" #custom-passivation-strategy #lru-policy - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-lru-strategy custom-lru-strategy { active-entity-limit = 1000000 @@ -196,7 +196,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow segmented least recently used passivation strategy to be configured (via config)" in { settings(""" #slru-policy - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-slru-strategy custom-slru-strategy { active-entity-limit = 1000000 @@ -221,7 +221,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow 4-level segmented least recently used passivation strategy to be configured (via config)" in { settings(""" #s4lru-policy - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-s4lru-strategy custom-s4lru-strategy { active-entity-limit = 1000000 @@ -254,7 +254,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow least recently used passivation strategy with idle timeout to be configured (via config)" in { settings(""" - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-lru-with-idle custom-lru-with-idle { active-entity-limit = 1000000 @@ -284,7 +284,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow most recently used passivation strategy to be configured (via config)" in { settings(""" #mru-policy - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-mru-strategy custom-mru-strategy { active-entity-limit = 1000000 @@ -310,7 +310,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow most recently used passivation strategy with idle timeout to be configured (via config)" in { settings(""" - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-mru-with-idle custom-mru-with-idle { active-entity-limit = 1000000 @@ -338,7 +338,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow least frequently used passivation strategy to be configured (via config)" in { settings(""" #lfu-policy - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-lfu-strategy custom-lfu-strategy { active-entity-limit = 1000000 @@ -366,7 +366,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow least frequently used passivation strategy with idle timeout to be configured (via config)" in { settings(""" - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-lfu-with-idle custom-lfu-with-idle { active-entity-limit = 1000000 @@ -396,7 +396,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow least frequently used passivation strategy with dynamic aging to be configured (via config)" in { settings(""" #lfuda-policy - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-lfu-with-dynamic-aging custom-lfu-with-dynamic-aging { active-entity-limit = 1000 @@ -432,7 +432,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow passivation strategy admission window policy to be configured (via config)" in { settings(""" #admission-window-policy - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-strategy-with-admission-window custom-strategy-with-admission-window { active-entity-limit = 1000000 @@ -458,7 +458,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow passivation strategy admission window proportion to be configured (via config)" in { settings(""" #admission-window-proportion - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-strategy-with-admission-window custom-strategy-with-admission-window { active-entity-limit = 1000000 @@ -487,7 +487,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow passivation strategy admission window optimizer to be configured (via config)" in { settings(""" #admission-window-optimizer - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-strategy-with-admission-window custom-strategy-with-admission-window { active-entity-limit = 1000000 @@ -520,7 +520,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow passivation strategy admission to be configured (via config)" in { settings(""" #admission-policy - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-strategy-with-admission custom-strategy-with-admission { active-entity-limit = 1000000 @@ -564,7 +564,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow passivation strategy admission parameters to be tuned (via config)" in { settings(""" - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-strategy-with-admission custom-strategy-with-admission { active-entity-limit = 1000000 @@ -702,7 +702,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "allow passivation strategy with admission and idle timeout to be configured (via config)" in { settings(""" - akka.cluster.sharding.passivation { + pekko.cluster.sharding.passivation { strategy = custom-strategy-with-admission custom-strategy-with-admission { active-entity-limit = 1000000 @@ -781,7 +781,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "disable automatic passivation if `remember-entities` is enabled (via config)" in { settings(""" - akka.cluster.sharding.remember-entities = on + pekko.cluster.sharding.remember-entities = on """).passivationStrategy shouldBe ClusterShardingSettings.NoPassivationStrategy } @@ -793,7 +793,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "disable automatic passivation if idle timeout is set to zero (via config)" in { settings(""" - akka.cluster.sharding.passivation.default-idle-strategy.idle-entity.timeout = 0 + pekko.cluster.sharding.passivation.default-idle-strategy.idle-entity.timeout = 0 """).passivationStrategy shouldBe ClusterShardingSettings.NoPassivationStrategy } @@ -812,7 +812,7 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "support old `passivate-idle-entity-after` setting (overriding new strategy settings)" in { settings(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivate-idle-entity-after = 5 minutes passivation.strategy = default-strategy } diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ConcurrentStartupShardingSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ConcurrentStartupShardingSpec.scala index c56711a2da..8bd689793c 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ConcurrentStartupShardingSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ConcurrentStartupShardingSpec.scala @@ -20,15 +20,15 @@ object ConcurrentStartupShardingSpec { val config = """ - akka.actor.provider = "cluster" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.log-dead-letters = off - akka.log-dead-letters-during-shutdown = off - akka.cluster.sharding.verbose-debug-logging = on - akka.actor { + pekko.actor.provider = "cluster" + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.log-dead-letters = off + pekko.log-dead-letters-during-shutdown = off + pekko.cluster.sharding.verbose-debug-logging = on + pekko.actor { default-dispatcher { executor = "fork-join-executor" fork-join-executor { diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/CoordinatedShutdownShardingSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/CoordinatedShutdownShardingSpec.scala index a52752d202..d4edeb8fab 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/CoordinatedShutdownShardingSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/CoordinatedShutdownShardingSpec.scala @@ -23,12 +23,12 @@ import pekko.util.ccompat._ object CoordinatedShutdownShardingSpec { val config = """ - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.cluster.sharding.verbose-debug-logging = on + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.cluster.sharding.verbose-debug-logging = on """ val extractEntityId: ShardRegion.ExtractEntityId = { diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/EntityTerminationSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/EntityTerminationSpec.scala index 260270ff31..d51bfe1fbf 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/EntityTerminationSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/EntityTerminationSpec.scala @@ -25,17 +25,17 @@ object EntityTerminationSpec { final case class EntityEnvelope(id: String, msg: Any) def config = ConfigFactory.parseString(""" - akka.loglevel=DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = cluster - akka.remote.artery.canonical.port = 0 - akka.remote.classic.netty.tcp.port = 0 - akka.cluster.sharding.state-store-mode = ddata + pekko.loglevel=DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = cluster + pekko.remote.artery.canonical.port = 0 + pekko.remote.classic.netty.tcp.port = 0 + pekko.cluster.sharding.state-store-mode = ddata # no leaks between test runs thank you - akka.cluster.sharding.distributed-data.durable.keys = [] - akka.cluster.sharding.verbose-debug-logging = on - akka.cluster.sharding.fail-on-invalid-entity-state-transition = on - akka.cluster.sharding.entity-restart-backoff = 250ms + pekko.cluster.sharding.distributed-data.durable.keys = [] + pekko.cluster.sharding.verbose-debug-logging = on + pekko.cluster.sharding.fail-on-invalid-entity-state-transition = on + pekko.cluster.sharding.entity-restart-backoff = 250ms """.stripMargin) object StoppingActor { diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/GetShardTypeNamesSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/GetShardTypeNamesSpec.scala index 8fba090d8d..3f4333f925 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/GetShardTypeNamesSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/GetShardTypeNamesSpec.scala @@ -14,12 +14,12 @@ import pekko.testkit.WithLogCapturing object GetShardTypeNamesSpec { val config = """ - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.cluster.sharding.fail-on-invalid-entity-state-transition = on + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.cluster.sharding.fail-on-invalid-entity-state-transition = on """ val extractEntityId: ShardRegion.ExtractEntityId = { diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/JoinConfigCompatCheckShardingSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/JoinConfigCompatCheckShardingSpec.scala index 560b339e38..16241b7af0 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/JoinConfigCompatCheckShardingSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/JoinConfigCompatCheckShardingSpec.scala @@ -27,23 +27,23 @@ class JoinConfigCompatCheckShardingSpec extends AkkaSpec() with WithLogCapturing val baseConfig: Config = ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.coordinated-shutdown.terminate-actor-system = on - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.cluster.sharding.verbose-debug-logging = on + pekko.actor.provider = "cluster" + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.coordinated-shutdown.terminate-actor-system = on + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.cluster.sharding.verbose-debug-logging = on """) "A Joining Node" must { /** This test verifies the built-in JoinConfigCompatCheckerSharding */ - "NOT be allowed to join a cluster using a different value for akka.cluster.sharding.state-store-mode" taggedAs LongRunningTest in { + "NOT be allowed to join a cluster using a different value for pekko.cluster.sharding.state-store-mode" taggedAs LongRunningTest in { val joinNodeConfig = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { # use 'persistence' for state store sharding.state-store-mode = "persistence" diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/LeastShardAllocationStrategyRandomizedSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/LeastShardAllocationStrategyRandomizedSpec.scala index 730a306e47..1a6bc4daaa 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/LeastShardAllocationStrategyRandomizedSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/LeastShardAllocationStrategyRandomizedSpec.scala @@ -19,7 +19,7 @@ import pekko.testkit.AkkaSpec import scala.collection.immutable.SortedSet -class LeastShardAllocationStrategyRandomizedSpec extends AkkaSpec("akka.loglevel = INFO") { +class LeastShardAllocationStrategyRandomizedSpec extends AkkaSpec("pekko.loglevel = INFO") { import LeastShardAllocationStrategySpec.{ afterRebalance, countShards, diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/PersistentShardingMigrationSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/PersistentShardingMigrationSpec.scala index cac16172d0..cea55ac196 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/PersistentShardingMigrationSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/PersistentShardingMigrationSpec.scala @@ -23,11 +23,11 @@ import scala.concurrent.duration._ */ object PersistentShardingMigrationSpec { val config = ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.artery.canonical.port = 0 - akka.remote.classic.netty.tcp.port = 0 - akka.cluster.sharding { + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.artery.canonical.port = 0 + pekko.remote.classic.netty.tcp.port = 0 + pekko.cluster.sharding { remember-entities = on remember-entities-store = "eventsourced" @@ -45,12 +45,12 @@ object PersistentShardingMigrationSpec { retry-interval = 500ms } - akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/PersistentShardingMigrationSpec-${UUID + pekko.persistence.journal.plugin = "pekko.persistence.journal.leveldb" + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/PersistentShardingMigrationSpec-${UUID .randomUUID() .toString}" - akka.persistence.journal.leveldb { + pekko.persistence.journal.leveldb { native = off dir = "target/journal-PersistentShardingMigrationSpec-${UUID.randomUUID()}" } @@ -58,13 +58,13 @@ object PersistentShardingMigrationSpec { val configForNewMode = ConfigFactory .parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { remember-entities = on remember-entities-store = "eventsourced" state-store-mode = "ddata" } - akka.persistence.journal.leveldb { + pekko.persistence.journal.leveldb { event-adapters { coordinator-migration = "org.apache.pekko.cluster.sharding.OldCoordinatorStateMigrationEventAdapter" } diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ProxyShardingSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ProxyShardingSpec.scala index 812153a501..4f39180bd2 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ProxyShardingSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ProxyShardingSpec.scala @@ -16,13 +16,13 @@ import pekko.testkit.WithLogCapturing object ProxyShardingSpec { val config = """ - akka.actor.provider = cluster - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.cluster.sharding.verbose-debug-logging = on - akka.cluster.sharding.fail-on-invalid-entity-state-transition = on + pekko.actor.provider = cluster + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.cluster.sharding.verbose-debug-logging = on + pekko.cluster.sharding.fail-on-invalid-entity-state-transition = on """ } diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesAndStartEntitySpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesAndStartEntitySpec.scala index 2076d61a35..abf4970682 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesAndStartEntitySpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesAndStartEntitySpec.scala @@ -44,15 +44,15 @@ object RememberEntitiesAndStartEntitySpec { } val config = ConfigFactory.parseString(""" - akka.loglevel=DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = cluster - akka.remote.artery.canonical.port = 0 - akka.remote.classic.netty.tcp.port = 0 - akka.cluster.sharding.verbose-debug-logging = on - akka.cluster.sharding.fail-on-invalid-entity-state-transition = on + pekko.loglevel=DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = cluster + pekko.remote.artery.canonical.port = 0 + pekko.remote.classic.netty.tcp.port = 0 + pekko.cluster.sharding.verbose-debug-logging = on + pekko.cluster.sharding.fail-on-invalid-entity-state-transition = on # no leaks between test runs thank you - akka.cluster.sharding.distributed-data.durable.keys = [] + pekko.cluster.sharding.distributed-data.durable.keys = [] """.stripMargin) } diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesBatchedUpdatesSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesBatchedUpdatesSpec.scala index f85285734d..65dd431d94 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesBatchedUpdatesSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesBatchedUpdatesSpec.scala @@ -41,17 +41,17 @@ object RememberEntitiesBatchedUpdatesSpec { } def config = ConfigFactory.parseString(""" - akka.loglevel=DEBUG - # akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = cluster - akka.remote.artery.canonical.port = 0 - akka.remote.classic.netty.tcp.port = 0 - akka.cluster.sharding.state-store-mode = ddata - akka.cluster.sharding.remember-entities = on + pekko.loglevel=DEBUG + # pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = cluster + pekko.remote.artery.canonical.port = 0 + pekko.remote.classic.netty.tcp.port = 0 + pekko.cluster.sharding.state-store-mode = ddata + pekko.cluster.sharding.remember-entities = on # no leaks between test runs thank you - akka.cluster.sharding.distributed-data.durable.keys = [] - akka.cluster.sharding.verbose-debug-logging = on - akka.cluster.sharding.fail-on-invalid-entity-state-transition = on + pekko.cluster.sharding.distributed-data.durable.keys = [] + pekko.cluster.sharding.verbose-debug-logging = on + pekko.cluster.sharding.fail-on-invalid-entity-state-transition = on """.stripMargin) } class RememberEntitiesBatchedUpdatesSpec diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesFailureSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesFailureSpec.scala index 3aa185c82b..0c5528e395 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesFailureSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesFailureSpec.scala @@ -27,24 +27,24 @@ import org.apache.pekko.cluster.sharding.ShardCoordinator.ShardAllocationStrateg object RememberEntitiesFailureSpec { val config = ConfigFactory.parseString(s""" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = cluster - akka.remote.artery.canonical.port = 0 - akka.remote.classic.netty.tcp.port = 0 - akka.cluster.sharding.distributed-data.durable.keys = [] + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = cluster + pekko.remote.artery.canonical.port = 0 + pekko.remote.classic.netty.tcp.port = 0 + pekko.cluster.sharding.distributed-data.durable.keys = [] # must be ddata or else remember entities store is ignored - akka.cluster.sharding.state-store-mode = ddata - akka.cluster.sharding.remember-entities = on - akka.cluster.sharding.remember-entities-store = custom - akka.cluster.sharding.remember-entities-custom-store = "org.apache.pekko.cluster.sharding.RememberEntitiesFailureSpec$$FakeStore" + pekko.cluster.sharding.state-store-mode = ddata + pekko.cluster.sharding.remember-entities = on + pekko.cluster.sharding.remember-entities-store = custom + pekko.cluster.sharding.remember-entities-custom-store = "org.apache.pekko.cluster.sharding.RememberEntitiesFailureSpec$$FakeStore" # quick backoffs - akka.cluster.sharding.entity-restart-backoff = 1s - akka.cluster.sharding.shard-failure-backoff = 1s - akka.cluster.sharding.coordinator-failure-backoff = 1s - akka.cluster.sharding.updating-state-timeout = 1s - akka.cluster.sharding.verbose-debug-logging = on - akka.cluster.sharding.fail-on-invalid-entity-state-transition = on + pekko.cluster.sharding.entity-restart-backoff = 1s + pekko.cluster.sharding.shard-failure-backoff = 1s + pekko.cluster.sharding.coordinator-failure-backoff = 1s + pekko.cluster.sharding.updating-state-timeout = 1s + pekko.cluster.sharding.verbose-debug-logging = on + pekko.cluster.sharding.fail-on-invalid-entity-state-transition = on """) class EntityActor extends Actor with ActorLogging { diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesShardIdExtractorChangeSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesShardIdExtractorChangeSpec.scala index 8861f64202..89d30bf194 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesShardIdExtractorChangeSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RememberEntitiesShardIdExtractorChangeSpec.scala @@ -27,22 +27,22 @@ import scala.concurrent.duration._ */ object RememberEntitiesShardIdExtractorChangeSpec { val config = ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.artery.canonical.port = 0 - akka.remote.classic.netty.tcp.port = 0 - akka.cluster.sharding { + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.artery.canonical.port = 0 + pekko.remote.classic.netty.tcp.port = 0 + pekko.cluster.sharding { remember-entities = on remember-entities-store = "eventsourced" state-store-mode = "ddata" } - akka.cluster.sharding.fail-on-invalid-entity-state-transition = on - akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/RememberEntitiesShardIdExtractorChangeSpec-${UUID + pekko.cluster.sharding.fail-on-invalid-entity-state-transition = on + pekko.persistence.journal.plugin = "pekko.persistence.journal.leveldb" + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/RememberEntitiesShardIdExtractorChangeSpec-${UUID .randomUUID() .toString}" - akka.persistence.journal.leveldb { + pekko.persistence.journal.leveldb { native = off dir = "target/journal-PersistentShardingMigrationSpec-${UUID.randomUUID()}" } diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala index a804a4b9f3..6c484cdd7a 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala @@ -30,22 +30,22 @@ import pekko.testkit.WithLogCapturing object RemoveInternalClusterShardingDataSpec { val config = """ - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" - akka.persistence.journal.leveldb { + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.persistence.journal.plugin = "pekko.persistence.journal.leveldb" + pekko.persistence.journal.leveldb { native = off dir = "target/journal-RemoveInternalClusterShardingDataSpec" } - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/snapshots-RemoveInternalClusterShardingDataSpec" - akka.cluster.sharding.snapshot-after = 5 - akka.cluster.sharding.state-store-mode = persistence - akka.cluster.sharding.keep-nr-of-batches = 0 - akka.cluster.sharding.verbose-debug-logging = on + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/snapshots-RemoveInternalClusterShardingDataSpec" + pekko.cluster.sharding.snapshot-after = 5 + pekko.cluster.sharding.state-store-mode = persistence + pekko.cluster.sharding.keep-nr-of-batches = 0 + pekko.cluster.sharding.verbose-debug-logging = on """ val extractEntityId: ShardRegion.ExtractEntityId = { @@ -105,7 +105,7 @@ class RemoveInternalClusterShardingDataSpec import RemoveInternalClusterShardingDataSpec._ val storageLocations = - List("akka.persistence.journal.leveldb.dir", "akka.persistence.snapshot-store.local.dir").map(s => + List("pekko.persistence.journal.leveldb.dir", "pekko.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) override protected def atStartup(): Unit = { diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ShardRegionSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ShardRegionSpec.scala index 7a6b1792d8..8465db2d0f 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ShardRegionSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ShardRegionSpec.scala @@ -19,27 +19,27 @@ object ShardRegionSpec { val host = "127.0.0.1" val tempConfig = s""" - akka.remote.classic.netty.tcp.hostname = "$host" - akka.remote.artery.canonical.hostname = "$host" + pekko.remote.classic.netty.tcp.hostname = "$host" + pekko.remote.artery.canonical.hostname = "$host" """ val config = ConfigFactory.parseString(tempConfig).withFallback(ConfigFactory.parseString(s""" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.log-remote-lifecycle-events = off - akka.test.single-expect-default = 5 s - akka.cluster.sharding.distributed-data.durable.lmdb { + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.log-remote-lifecycle-events = off + pekko.test.single-expect-default = 5 s + pekko.cluster.sharding.distributed-data.durable.lmdb { dir = "target/ShardRegionSpec/sharding-ddata" map-size = 10 MiB } - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.jmx.enabled = off - akka.cluster.sharding.verbose-debug-logging = on - akka.cluster.sharding.fail-on-invalid-entity-state-transition = on + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.jmx.enabled = off + pekko.cluster.sharding.verbose-debug-logging = on + pekko.cluster.sharding.fail-on-invalid-entity-state-transition = on """)) val shardTypeName = "Caat" @@ -70,7 +70,8 @@ class ShardRegionSpec extends AkkaSpec(ShardRegionSpec.config) with WithLogCaptu import ShardRegionSpec._ val storageLocation = List( - new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) + new File( + system.settings.config.getString("pekko.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) // mute logging of deadLetters system.eventStream.publish(Mute(DeadLettersFilter[Any])) diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ShardWithLeaseSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ShardWithLeaseSpec.scala index e676674813..3a08b0b804 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ShardWithLeaseSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/ShardWithLeaseSpec.scala @@ -26,19 +26,19 @@ import scala.util.control.NoStackTrace object ShardWithLeaseSpec { val config = """ - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 test-lease { lease-class = org.apache.pekko.coordination.lease.TestLease heartbeat-interval = 1s heartbeat-timeout = 120s lease-operation-timeout = 3s } - akka.cluster.sharding.verbose-debug-logging = on - akka.cluster.sharding.fail-on-invalid-entity-state-transition = on + pekko.cluster.sharding.verbose-debug-logging = on + pekko.cluster.sharding.fail-on-invalid-entity-state-transition = on """ class EntityActor extends Actor with ActorLogging { diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/StartEntitySpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/StartEntitySpec.scala index 2961533373..a7df4e6126 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/StartEntitySpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/StartEntitySpec.scala @@ -24,17 +24,17 @@ object StartEntitySpec { final case class EntityEnvelope(id: String, msg: Any) def config = ConfigFactory.parseString(""" - akka.loglevel=DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = cluster - akka.remote.artery.canonical.port = 0 - akka.remote.classic.netty.tcp.port = 0 - akka.cluster.sharding.state-store-mode = ddata - akka.cluster.sharding.remember-entities = on + pekko.loglevel=DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = cluster + pekko.remote.artery.canonical.port = 0 + pekko.remote.classic.netty.tcp.port = 0 + pekko.cluster.sharding.state-store-mode = ddata + pekko.cluster.sharding.remember-entities = on # no leaks between test runs thank you - akka.cluster.sharding.distributed-data.durable.keys = [] - akka.cluster.sharding.verbose-debug-logging = on - akka.cluster.sharding.fail-on-invalid-entity-state-transition = on + pekko.cluster.sharding.distributed-data.durable.keys = [] + pekko.cluster.sharding.verbose-debug-logging = on + pekko.cluster.sharding.fail-on-invalid-entity-state-transition = on """.stripMargin) object EntityActor { diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/SupervisionSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/SupervisionSpec.scala index 461cd2c934..fc9b348b6c 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/SupervisionSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/SupervisionSpec.scala @@ -18,13 +18,13 @@ import pekko.testkit.{ AkkaSpec, ImplicitSender } object SupervisionSpec { val config = ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.remote.artery.canonical.port = 0 - akka.remote.classic.netty.tcp.port = 0 - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.loglevel = DEBUG - akka.cluster.sharding.verbose-debug-logging = on - akka.cluster.sharding.fail-on-invalid-entity-state-transition = on + pekko.actor.provider = "cluster" + pekko.remote.artery.canonical.port = 0 + pekko.remote.classic.netty.tcp.port = 0 + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.loglevel = DEBUG + pekko.cluster.sharding.verbose-debug-logging = on + pekko.cluster.sharding.fail-on-invalid-entity-state-transition = on """) case class Msg(id: Long, msg: Any) diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/external/ExternalShardAllocationStrategySpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/external/ExternalShardAllocationStrategySpec.scala index f266a6d204..4fe2702c11 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/external/ExternalShardAllocationStrategySpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/external/ExternalShardAllocationStrategySpec.scala @@ -15,9 +15,9 @@ import pekko.testkit.TestProbe import pekko.util.Timeout class ExternalShardAllocationStrategySpec extends AkkaSpec(""" - akka.actor.provider = cluster - akka.loglevel = INFO - akka.remote.artery.canonical.port = 0 + pekko.actor.provider = cluster + pekko.loglevel = INFO + pekko.remote.artery.canonical.port = 0 """) { val requester = TestProbe() diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/internal/RememberEntitiesShardStoreSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/internal/RememberEntitiesShardStoreSpec.scala index 3911055cf3..9717575498 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/internal/RememberEntitiesShardStoreSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/internal/RememberEntitiesShardStoreSpec.scala @@ -22,19 +22,19 @@ import org.scalatest.wordspec.AnyWordSpecLike object RememberEntitiesShardStoreSpec { def config = ConfigFactory.parseString(s""" - akka.loglevel=DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = cluster - akka.remote.artery.canonical.port = 0 - akka.remote.classic.netty.tcp.port = 0 - akka.cluster.sharding.state-store-mode = ddata - akka.cluster.sharding.snapshot-after = 2 - akka.cluster.sharding.remember-entities = on + pekko.loglevel=DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = cluster + pekko.remote.artery.canonical.port = 0 + pekko.remote.classic.netty.tcp.port = 0 + pekko.cluster.sharding.state-store-mode = ddata + pekko.cluster.sharding.snapshot-after = 2 + pekko.cluster.sharding.remember-entities = on # no leaks between test runs thank you - akka.cluster.sharding.distributed-data.durable.keys = [] - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/${classOf[RememberEntitiesShardStoreSpec].getName}-${UUID + pekko.cluster.sharding.distributed-data.durable.keys = [] + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/${classOf[RememberEntitiesShardStoreSpec].getName}-${UUID .randomUUID() .toString}" """.stripMargin) diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/internal/RememberEntitiesStarterSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/internal/RememberEntitiesStarterSpec.scala index f23be1d8b6..885fce37ce 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/internal/RememberEntitiesStarterSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/internal/RememberEntitiesStarterSpec.scala @@ -61,7 +61,7 @@ class RememberEntitiesStarterSpec extends AkkaSpec { """ retry-interval = 1 second """) - .withFallback(system.settings.config.getConfig("akka.cluster.sharding"))) + .withFallback(system.settings.config.getConfig("pekko.cluster.sharding"))) val rememberEntityStarter = system.actorOf( RememberEntityStarter.props(regionProbe.ref, shardProbe.ref, shardId, Set("1", "2", "3"), customSettings)) @@ -94,7 +94,7 @@ class RememberEntitiesStarterSpec extends AkkaSpec { """ retry-interval = 1 second """) - .withFallback(system.settings.config.getConfig("akka.cluster.sharding"))) + .withFallback(system.settings.config.getConfig("pekko.cluster.sharding"))) val rememberEntityStarter = system.actorOf( RememberEntityStarter.props(regionProbe.ref, shardProbe.ref, shardId, Set("1", "2", "3"), customSettings)) @@ -130,7 +130,7 @@ class RememberEntitiesStarterSpec extends AkkaSpec { } retry-interval = 1 second """) - .withFallback(system.settings.config.getConfig("akka.cluster.sharding"))) + .withFallback(system.settings.config.getConfig("pekko.cluster.sharding"))) val rememberEntityStarter = system.actorOf( RememberEntityStarter diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/CompositeSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/CompositeSpec.scala index 74ff6ce6af..e16a9fd1c6 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/CompositeSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/CompositeSpec.scala @@ -13,7 +13,7 @@ import scala.concurrent.duration._ object CompositeSpec { val admissionWindowAndFilterConfig: Config = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivation { strategy = lru-fs-slru lru-fs-slru { @@ -41,7 +41,7 @@ object CompositeSpec { """).withFallback(EntityPassivationSpec.config) val admissionFilterNoWindowConfig: Config = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivation { strategy = fs-lru fs-lru { @@ -57,7 +57,7 @@ object CompositeSpec { """).withFallback(EntityPassivationSpec.config) val adaptiveWindowConfig: Config = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivation { strategy = lru-fs-lru-hc lru-fs-lru-hc { @@ -88,7 +88,7 @@ object CompositeSpec { """).withFallback(EntityPassivationSpec.config) val idleConfig: Config = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivation { strategy = default-strategy default-strategy { diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/EntityPassivationSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/EntityPassivationSpec.scala index 5048b6d669..5614a46c76 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/EntityPassivationSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/EntityPassivationSpec.scala @@ -24,17 +24,17 @@ import scala.concurrent.duration._ object EntityPassivationSpec { val config: Config = ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.cluster.sharding.verbose-debug-logging = on - akka.cluster.sharding.fail-on-invalid-entity-state-transition = on + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.cluster.sharding.verbose-debug-logging = on + pekko.cluster.sharding.fail-on-invalid-entity-state-transition = on """) val disabledConfig: Config = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivation { strategy = none } diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/IdleSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/IdleSpec.scala index a92f547b30..ef3ed0e44d 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/IdleSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/IdleSpec.scala @@ -11,7 +11,7 @@ import scala.concurrent.duration._ object IdleSpec { val config: Config = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivation { default-idle-strategy.idle-entity.timeout = 1s } diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/LeastFrequentlyUsedSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/LeastFrequentlyUsedSpec.scala index d64ca0e2db..dfe97fbafc 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/LeastFrequentlyUsedSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/LeastFrequentlyUsedSpec.scala @@ -13,7 +13,7 @@ import scala.concurrent.duration._ object LeastFrequentlyUsedSpec { val config: Config = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivation { strategy = lfu lfu { @@ -25,7 +25,7 @@ object LeastFrequentlyUsedSpec { """).withFallback(EntityPassivationSpec.config) val dynamicAgingConfig: Config = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivation { strategy = lfuda lfuda { @@ -42,7 +42,7 @@ object LeastFrequentlyUsedSpec { """).withFallback(EntityPassivationSpec.config) val idleConfig: Config = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivation { strategy = lfu-idle lfu-idle { diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/LeastRecentlyUsedSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/LeastRecentlyUsedSpec.scala index 252047bfae..ca122d1d42 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/LeastRecentlyUsedSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/LeastRecentlyUsedSpec.scala @@ -13,7 +13,7 @@ import scala.concurrent.duration._ object LeastRecentlyUsedSpec { val config: Config = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivation { strategy = lru lru { @@ -25,7 +25,7 @@ object LeastRecentlyUsedSpec { """).withFallback(EntityPassivationSpec.config) val segmentedConfig: Config = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivation { strategy = slru slru { @@ -46,11 +46,11 @@ object LeastRecentlyUsedSpec { val segmentedInitialLimitConfig: Config = ConfigFactory.parseString(""" - akka.cluster.sharding.passivation.slru.active-entity-limit = 20 + pekko.cluster.sharding.passivation.slru.active-entity-limit = 20 """).withFallback(segmentedConfig) val idleConfig: Config = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivation { strategy = lru-idle lru-idle { diff --git a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/MostRecentlyUsedSpec.scala b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/MostRecentlyUsedSpec.scala index 728486aa06..32508ee144 100644 --- a/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/MostRecentlyUsedSpec.scala +++ b/akka-cluster-sharding/src/test/scala/org/apache/pekko/cluster/sharding/passivation/MostRecentlyUsedSpec.scala @@ -13,7 +13,7 @@ import scala.concurrent.duration._ object MostRecentlyUsedSpec { val config: Config = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivation { strategy = mru mru { @@ -25,7 +25,7 @@ object MostRecentlyUsedSpec { """).withFallback(EntityPassivationSpec.config) val idleConfig: Config = ConfigFactory.parseString(""" - akka.cluster.sharding { + pekko.cluster.sharding { passivation { strategy = mru-idle mru-idle { diff --git a/akka-cluster-tools/src/main/resources/reference.conf b/akka-cluster-tools/src/main/resources/reference.conf index aad17ed753..f3c5b1e464 100644 --- a/akka-cluster-tools/src/main/resources/reference.conf +++ b/akka-cluster-tools/src/main/resources/reference.conf @@ -1,5 +1,5 @@ ############################################ -# Akka Cluster Tools Reference Config File # +# Pekko Cluster Tools Reference Config File # ############################################ # This is the reference config file that contains all the default settings. @@ -7,7 +7,7 @@ # //#pub-sub-ext-config # Settings for the DistributedPubSub extension -akka.cluster.pub-sub { +pekko.cluster.pub-sub { # Actor name of the mediator actor, /system/distributedPubSubMediator name = distributedPubSubMediator @@ -34,18 +34,18 @@ akka.cluster.pub-sub { # The id of the dispatcher to use for DistributedPubSubMediator actors. # If specified you need to define the settings of the actual dispatcher. - use-dispatcher = "akka.actor.internal-dispatcher" + use-dispatcher = "pekko.actor.internal-dispatcher" } # //#pub-sub-ext-config # Protobuf serializer for cluster DistributedPubSubMeditor messages -akka.actor { +pekko.actor { serializers { - akka-pubsub = "org.apache.pekko.cluster.pubsub.protobuf.DistributedPubSubMessageSerializer" + pekko-pubsub = "org.apache.pekko.cluster.pubsub.protobuf.DistributedPubSubMessageSerializer" } serialization-bindings { - "org.apache.pekko.cluster.pubsub.DistributedPubSubMessage" = akka-pubsub - "org.apache.pekko.cluster.pubsub.DistributedPubSubMediator$Internal$SendToOneSubscriber" = akka-pubsub + "org.apache.pekko.cluster.pubsub.DistributedPubSubMessage" = pekko-pubsub + "org.apache.pekko.cluster.pubsub.DistributedPubSubMediator$Internal$SendToOneSubscriber" = pekko-pubsub } serialization-identifiers { "org.apache.pekko.cluster.pubsub.protobuf.DistributedPubSubMessageSerializer" = 9 @@ -55,7 +55,7 @@ akka.actor { # //#receptionist-ext-config # Settings for the ClusterClientReceptionist extension -akka.cluster.client.receptionist { +pekko.cluster.client.receptionist { # Actor name of the ClusterReceptionist actor, /system/receptionist name = receptionist @@ -72,7 +72,7 @@ akka.cluster.client.receptionist { # The id of the dispatcher to use for ClusterReceptionist actors. # If specified you need to define the settings of the actual dispatcher. - use-dispatcher = "akka.actor.internal-dispatcher" + use-dispatcher = "pekko.actor.internal-dispatcher" # How often failure detection heartbeat messages should be received for # each ClusterClient @@ -93,7 +93,7 @@ akka.cluster.client.receptionist { # //#cluster-client-config # Settings for the ClusterClient -akka.cluster.client { +pekko.cluster.client { # Actor paths of the ClusterReceptionist actors on the servers (cluster nodes) # that the client will try to contact initially. It is mandatory to specify # at least one initial contact. @@ -140,12 +140,12 @@ akka.cluster.client { # //#cluster-client-config # Protobuf serializer for ClusterClient messages -akka.actor { +pekko.actor { serializers { - akka-cluster-client = "org.apache.pekko.cluster.client.protobuf.ClusterClientMessageSerializer" + pekko-cluster-client = "org.apache.pekko.cluster.client.protobuf.ClusterClientMessageSerializer" } serialization-bindings { - "org.apache.pekko.cluster.client.ClusterClientMessage" = akka-cluster-client + "org.apache.pekko.cluster.client.ClusterClientMessage" = pekko-cluster-client } serialization-identifiers { "org.apache.pekko.cluster.client.protobuf.ClusterClientMessageSerializer" = 15 @@ -153,7 +153,7 @@ akka.actor { } # //#singleton-config -akka.cluster.singleton { +pekko.cluster.singleton { # The actor name of the child singleton actor. singleton-name = "singleton" @@ -164,11 +164,11 @@ akka.cluster.singleton { # When a node is becoming oldest it sends hand-over request to previous oldest, # that might be leaving the cluster. This is retried with this interval until # the previous oldest confirms that the hand over has started or the previous - # oldest member is removed from the cluster (+ akka.cluster.down-removal-margin). + # oldest member is removed from the cluster (+ pekko.cluster.down-removal-margin). hand-over-retry-interval = 1s # The number of retries are derived from hand-over-retry-interval and - # akka.cluster.down-removal-margin (or ClusterSingletonManagerSettings.removalMargin), + # pekko.cluster.down-removal-margin (or ClusterSingletonManagerSettings.removalMargin), # but it will never be less than this property. # After the hand over retries and it's still not able to exchange the hand over messages # with the previous oldest it will restart itself by throwing ClusterSingletonManagerIsStuck, @@ -193,9 +193,9 @@ akka.cluster.singleton { # //#singleton-config # //#singleton-proxy-config -akka.cluster.singleton-proxy { +pekko.cluster.singleton-proxy { # The actor name of the singleton actor that is started by the ClusterSingletonManager - singleton-name = ${akka.cluster.singleton.singleton-name} + singleton-name = ${pekko.cluster.singleton.singleton-name} # The role of the cluster nodes where the singleton can be deployed. # Corresponding to the role used by the `ClusterSingletonManager`. If the role is not @@ -218,12 +218,12 @@ akka.cluster.singleton-proxy { # //#singleton-proxy-config # Serializer for cluster ClusterSingleton messages -akka.actor { +pekko.actor { serializers { - akka-singleton = "org.apache.pekko.cluster.singleton.protobuf.ClusterSingletonMessageSerializer" + pekko-singleton = "org.apache.pekko.cluster.singleton.protobuf.ClusterSingletonMessageSerializer" } serialization-bindings { - "org.apache.pekko.cluster.singleton.ClusterSingletonMessage" = akka-singleton + "org.apache.pekko.cluster.singleton.ClusterSingletonMessage" = pekko-singleton } serialization-identifiers { "org.apache.pekko.cluster.singleton.protobuf.ClusterSingletonMessageSerializer" = 14 diff --git a/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/client/ClusterClient.scala b/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/client/ClusterClient.scala index 0ffeeb8364..a44328824e 100644 --- a/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/client/ClusterClient.scala +++ b/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/client/ClusterClient.scala @@ -53,14 +53,14 @@ object ClusterClientSettings { /** * Create settings from the default configuration - * `akka.cluster.client`. + * `pekko.cluster.client`. */ def apply(system: ActorSystem): ClusterClientSettings = - apply(system.settings.config.getConfig("akka.cluster.client")) + apply(system.settings.config.getConfig("pekko.cluster.client")) /** * Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.client`. + * the default configuration `pekko.cluster.client`. */ def apply(config: Config): ClusterClientSettings = { val initialContacts = immutableSeq(config.getStringList("initial-contacts")).map(ActorPath.fromString).toSet @@ -79,13 +79,13 @@ object ClusterClientSettings { /** * Java API: Create settings from the default configuration - * `akka.cluster.client`. + * `pekko.cluster.client`. */ def create(system: ActorSystem): ClusterClientSettings = apply(system) /** * Java API: Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.client`. + * the default configuration `pekko.cluster.client`. */ def create(config: Config): ClusterClientSettings = apply(config) @@ -570,7 +570,7 @@ object ClusterClientReceptionist extends ExtensionId[ClusterClientReceptionist] /** * Extension that starts [[ClusterReceptionist]] and accompanying [[pekko.cluster.pubsub.DistributedPubSubMediator]] - * with settings defined in config section `akka.cluster.client.receptionist`. + * with settings defined in config section `pekko.cluster.client.receptionist`. * The [[pekko.cluster.pubsub.DistributedPubSubMediator]] is started by the [[pekko.cluster.pubsub.DistributedPubSub]] extension. */ @deprecated( @@ -578,7 +578,7 @@ object ClusterClientReceptionist extends ExtensionId[ClusterClientReceptionist] since = "2.6.0") final class ClusterClientReceptionist(system: ExtendedActorSystem) extends Extension { - private val config = system.settings.config.getConfig("akka.cluster.client.receptionist") + private val config = system.settings.config.getConfig("pekko.cluster.client.receptionist") private val role: Option[String] = config.getString("role") match { case "" => None case r => Some(r) @@ -658,14 +658,14 @@ object ClusterReceptionistSettings { /** * Create settings from the default configuration - * `akka.cluster.client.receptionist`. + * `pekko.cluster.client.receptionist`. */ def apply(system: ActorSystem): ClusterReceptionistSettings = - apply(system.settings.config.getConfig("akka.cluster.client.receptionist")) + apply(system.settings.config.getConfig("pekko.cluster.client.receptionist")) /** * Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.client.receptionist`. + * the default configuration `pekko.cluster.client.receptionist`. */ def apply(config: Config): ClusterReceptionistSettings = new ClusterReceptionistSettings( @@ -678,13 +678,13 @@ object ClusterReceptionistSettings { /** * Java API: Create settings from the default configuration - * `akka.cluster.client.receptionist`. + * `pekko.cluster.client.receptionist`. */ def create(system: ActorSystem): ClusterReceptionistSettings = apply(system) /** * Java API: Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.client.receptionist`. + * the default configuration `pekko.cluster.client.receptionist`. */ def create(config: Config): ClusterReceptionistSettings = apply(config) diff --git a/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediator.scala b/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediator.scala index e0da1c37f9..9eda764ccc 100644 --- a/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediator.scala +++ b/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediator.scala @@ -36,14 +36,14 @@ object DistributedPubSubSettings { /** * Create settings from the default configuration - * `akka.cluster.pub-sub`. + * `pekko.cluster.pub-sub`. */ def apply(system: ActorSystem): DistributedPubSubSettings = - apply(system.settings.config.getConfig("akka.cluster.pub-sub")) + apply(system.settings.config.getConfig("pekko.cluster.pub-sub")) /** * Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.pub-sub`. + * the default configuration `pekko.cluster.pub-sub`. */ def apply(config: Config): DistributedPubSubSettings = new DistributedPubSubSettings( @@ -64,13 +64,13 @@ object DistributedPubSubSettings { /** * Java API: Create settings from the default configuration - * `akka.cluster.pub-sub`. + * `pekko.cluster.pub-sub`. */ def create(system: ActorSystem): DistributedPubSubSettings = apply(system) /** * Java API: Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.pub-sub`. + * the default configuration `pekko.cluster.pub-sub`. */ def create(config: Config): DistributedPubSubSettings = apply(config) @@ -935,7 +935,7 @@ object DistributedPubSub extends ExtensionId[DistributedPubSub] with ExtensionId /** * Extension that starts a [[DistributedPubSubMediator]] actor - * with settings defined in config section `akka.cluster.pub-sub`. + * with settings defined in config section `pekko.cluster.pub-sub`. */ class DistributedPubSub(system: ExtendedActorSystem) extends Extension { @@ -955,8 +955,8 @@ class DistributedPubSub(system: ExtendedActorSystem) extends Extension { if (isTerminated) system.deadLetters else { - val name = system.settings.config.getString("akka.cluster.pub-sub.name") - val dispatcher = system.settings.config.getString("akka.cluster.pub-sub.use-dispatcher") + val name = system.settings.config.getString("pekko.cluster.pub-sub.name") + val dispatcher = system.settings.config.getString("pekko.cluster.pub-sub.use-dispatcher") system.systemActorOf(DistributedPubSubMediator.props(settings).withDispatcher(dispatcher), name) } } diff --git a/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManager.scala b/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManager.scala index 6d17676590..a5c4004054 100644 --- a/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManager.scala +++ b/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManager.scala @@ -45,17 +45,17 @@ object ClusterSingletonManagerSettings { /** * Create settings from the default configuration - * `akka.cluster.singleton`. + * `pekko.cluster.singleton`. */ def apply(system: ActorSystem): ClusterSingletonManagerSettings = - apply(system.settings.config.getConfig("akka.cluster.singleton")) + apply(system.settings.config.getConfig("pekko.cluster.singleton")) // note that this setting has some additional logic inside the ClusterSingletonManager // falling back to DowningProvider.downRemovalMargin if it is off/Zero .withRemovalMargin(Cluster(system).settings.DownRemovalMargin) /** * Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.singleton`. + * the default configuration `pekko.cluster.singleton`. */ def apply(config: Config): ClusterSingletonManagerSettings = { val lease = config.getString("use-lease") match { @@ -73,13 +73,13 @@ object ClusterSingletonManagerSettings { /** * Java API: Create settings from the default configuration - * `akka.cluster.singleton`. + * `pekko.cluster.singleton`. */ def create(system: ActorSystem): ClusterSingletonManagerSettings = apply(system) /** * Java API: Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.singleton`. + * the default configuration `pekko.cluster.singleton`. */ def create(config: Config): ClusterSingletonManagerSettings = apply(config) @@ -512,7 +512,7 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se val (maxHandOverRetries, maxTakeOverRetries) = { val n = (removalMargin.toMillis / handOverRetryInterval.toMillis).toInt - val minRetries = context.system.settings.config.getInt("akka.cluster.singleton.min-number-of-hand-over-retries") + val minRetries = context.system.settings.config.getInt("pekko.cluster.singleton.min-number-of-hand-over-retries") require(minRetries >= 1, "min-number-of-hand-over-retries must be >= 1") val handOverRetries = math.max(minRetries, n + 3) val takeOverRetries = math.max(1, handOverRetries - 3) diff --git a/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/singleton/ClusterSingletonProxy.scala b/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/singleton/ClusterSingletonProxy.scala index 764bd6259f..33397fce47 100644 --- a/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/singleton/ClusterSingletonProxy.scala +++ b/akka-cluster-tools/src/main/scala/org/apache/pekko/cluster/singleton/ClusterSingletonProxy.scala @@ -29,14 +29,14 @@ object ClusterSingletonProxySettings { /** * Create settings from the default configuration - * `akka.cluster.singleton-proxy`. + * `pekko.cluster.singleton-proxy`. */ def apply(system: ActorSystem): ClusterSingletonProxySettings = - apply(system.settings.config.getConfig("akka.cluster.singleton-proxy")) + apply(system.settings.config.getConfig("pekko.cluster.singleton-proxy")) /** * Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.singleton-proxy`. + * the default configuration `pekko.cluster.singleton-proxy`. */ def apply(config: Config): ClusterSingletonProxySettings = new ClusterSingletonProxySettings( @@ -47,13 +47,13 @@ object ClusterSingletonProxySettings { /** * Java API: Create settings from the default configuration - * `akka.cluster.singleton-proxy`. + * `pekko.cluster.singleton-proxy`. */ def create(system: ActorSystem): ClusterSingletonProxySettings = apply(system) /** * Java API: Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.singleton-proxy`. + * the default configuration `pekko.cluster.singleton-proxy`. */ def create(config: Config): ClusterSingletonProxySettings = apply(config) diff --git a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/client/ClusterClientHandoverSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/client/ClusterClientHandoverSpec.scala index 22a6ea87b1..c093eb3dea 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/client/ClusterClientHandoverSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/client/ClusterClientHandoverSpec.scala @@ -21,17 +21,17 @@ object ClusterClientHandoverSpec extends MultiNodeConfig { val first = role("first") val second = role("second") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.client { + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.client { heartbeat-interval = 1d acceptable-heartbeat-pause = 1d reconnect-timeout = 3s refresh-contacts-interval = 1d } - akka.test.filter-leeway = 10s + pekko.test.filter-leeway = 10s """).withFallback(MultiNodeClusterSpec.clusterConfig)) } diff --git a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/client/ClusterClientSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/client/ClusterClientSpec.scala index a7544e10a6..76a5f9396f 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/client/ClusterClientSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/client/ClusterClientSpec.scala @@ -39,20 +39,20 @@ object ClusterClientSpec extends MultiNodeConfig { val fourth = role("fourth") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 0s - akka.cluster.client.heartbeat-interval = 1s - akka.cluster.client.acceptable-heartbeat-pause = 3s - akka.cluster.client.refresh-contacts-interval = 1s + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 0s + pekko.cluster.client.heartbeat-interval = 1s + pekko.cluster.client.acceptable-heartbeat-pause = 3s + pekko.cluster.client.refresh-contacts-interval = 1s # number-of-contacts must be >= 4 because we shutdown all but one in the end - akka.cluster.client.receptionist.number-of-contacts = 4 - akka.cluster.client.receptionist.heartbeat-interval = 10s - akka.cluster.client.receptionist.acceptable-heartbeat-pause = 10s - akka.cluster.client.receptionist.failure-detection-interval = 1s - akka.test.filter-leeway = 10s + pekko.cluster.client.receptionist.number-of-contacts = 4 + pekko.cluster.client.receptionist.heartbeat-interval = 10s + pekko.cluster.client.receptionist.acceptable-heartbeat-pause = 10s + pekko.cluster.client.receptionist.failure-detection-interval = 1s + pekko.test.filter-leeway = 10s """)) testTransport(on = true) diff --git a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/client/ClusterClientStopSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/client/ClusterClientStopSpec.scala index 09bcc9f75e..668ca0292f 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/client/ClusterClientStopSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/client/ClusterClientStopSpec.scala @@ -23,17 +23,17 @@ object ClusterClientStopSpec extends MultiNodeConfig { val first = role("first") val second = role("second") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.client { + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.client { heartbeat-interval = 1s acceptable-heartbeat-pause = 1s reconnect-timeout = 3s receptionist.number-of-contacts = 1 } - akka.test.filter-leeway = 10s + pekko.test.filter-leeway = 10s """)) class Service extends Actor { diff --git a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorSpec.scala index 640579de5e..e690f18533 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorSpec.scala @@ -30,12 +30,12 @@ object DistributedPubSubMediatorSpec extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 0s - akka.cluster.pub-sub.max-delta-elements = 500 + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 0s + pekko.cluster.pub-sub.max-delta-elements = 500 """)) object TestChatUser { diff --git a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubRestartSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubRestartSpec.scala index 99cc0bbba6..a52edca46c 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubRestartSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubRestartSpec.scala @@ -31,12 +31,12 @@ object DistributedPubSubRestartSpec extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.cluster.pub-sub.gossip-interval = 500ms - akka.actor.provider = cluster - akka.remote.log-remote-lifecycle-events = off - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = off + pekko.loglevel = INFO + pekko.cluster.pub-sub.gossip-interval = 500ms + pekko.actor.provider = cluster + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = off """)) testTransport(on = true) @@ -142,8 +142,8 @@ class DistributedPubSubRestartSpec val newSystem = { val port = Cluster(system).selfAddress.port.get val config = ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port=$port - akka.remote.classic.netty.tcp.port=$port + pekko.remote.artery.canonical.port=$port + pekko.remote.classic.netty.tcp.port=$port """).withFallback(system.settings.config) ActorSystem(system.name, config) diff --git a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerChaosSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerChaosSpec.scala index 4059900cfa..b602b93a39 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerChaosSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerChaosSpec.scala @@ -35,11 +35,11 @@ object ClusterSingletonManagerChaosSpec extends MultiNodeConfig { val sixth = role("sixth") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 0s + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 0s """)) case object EchoStarted diff --git a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerDownedSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerDownedSpec.scala index 86debd5d98..20cd8ce37b 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerDownedSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerDownedSpec.scala @@ -30,9 +30,9 @@ object ClusterSingletonManagerDownedSpec extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off """)) testTransport(on = true) diff --git a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerLeaseSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerLeaseSpec.scala index 9e3a907143..2e5e95cf46 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerLeaseSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerLeaseSpec.scala @@ -29,23 +29,23 @@ object ClusterSingletonManagerLeaseSpec extends MultiNodeConfig { testTransport(true) commonConfig(ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 0s + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 0s test-lease { lease-class = ${classOf[TestLeaseActorClient].getName} heartbeat-interval = 1s heartbeat-timeout = 120s lease-operation-timeout = 3s } - akka.cluster.singleton { + pekko.cluster.singleton { use-lease = "test-lease" } """)) - nodeConfig(first, second, third)(ConfigFactory.parseString("akka.cluster.roles = [worker]")) + nodeConfig(first, second, third)(ConfigFactory.parseString("pekko.cluster.roles = [worker]")) object ImportantSingleton { case class Response(msg: Any, address: Address) extends JavaSerializable diff --git a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerLeave2Spec.scala b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerLeave2Spec.scala index c217a51b42..1bceb42577 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerLeave2Spec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerLeave2Spec.scala @@ -32,11 +32,11 @@ object ClusterSingletonManagerLeave2Spec extends MultiNodeConfig { val fifth = role("fifth") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = off + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = off """)) case object EchoStarted diff --git a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala index 1bb82eccfa..8bb05d64eb 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala @@ -26,11 +26,11 @@ object ClusterSingletonManagerLeaveSpec extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = off + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = off """)) case object EchoStarted diff --git a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerPreparingForShutdownSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerPreparingForShutdownSpec.scala index 014a7c5462..e2a2ed5890 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerPreparingForShutdownSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerPreparingForShutdownSpec.scala @@ -26,12 +26,12 @@ object ClusterSingletonManagerPreparingForShutdownSpec extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = off - akka.cluster.leader-actions-interval = 100ms + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = off + pekko.cluster.leader-actions-interval = 100ms """)) case object EchoStarted diff --git a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerSpec.scala index adf53697fa..426cf16dd3 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerSpec.scala @@ -39,15 +39,15 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { val sixth = role("sixth") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 0s - akka.remote.artery.advanced.aeron.idle-cpu-level = 3 + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 0s + pekko.remote.artery.advanced.aeron.idle-cpu-level = 3 """)) - nodeConfig(first, second, third, fourth, fifth, sixth)(ConfigFactory.parseString("akka.cluster.roles =[worker]")) + nodeConfig(first, second, third, fourth, fifth, sixth)(ConfigFactory.parseString("pekko.cluster.roles =[worker]")) // #singleton-message-classes object PointToPointChannel { diff --git a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerStartupSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerStartupSpec.scala index ecfb3d1c88..271e73e6cb 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerStartupSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/ClusterSingletonManagerStartupSpec.scala @@ -27,11 +27,11 @@ object ClusterSingletonManagerStartupSpec extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 0s + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 0s """)) case object EchoStarted diff --git a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/MultiDcSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/MultiDcSingletonManagerSpec.scala index afeec20858..a1495d2f0e 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/MultiDcSingletonManagerSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/org/apache/pekko/cluster/singleton/MultiDcSingletonManagerSpec.scala @@ -23,24 +23,24 @@ object MultiDcSingletonManagerSpec extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off""")) + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off""")) nodeConfig(controller) { ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = one - akka.cluster.roles = []""") + pekko.cluster.multi-data-center.self-data-center = one + pekko.cluster.roles = []""") } nodeConfig(first) { ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = one - akka.cluster.roles = [ worker ]""") + pekko.cluster.multi-data-center.self-data-center = one + pekko.cluster.roles = [ worker ]""") } nodeConfig(second, third) { ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = two - akka.cluster.roles = [ worker ]""") + pekko.cluster.multi-data-center.self-data-center = two + pekko.cluster.roles = [ worker ]""") } } diff --git a/akka-cluster-tools/src/test/java/org/apache/pekko/cluster/client/ClusterClientTest.java b/akka-cluster-tools/src/test/java/org/apache/pekko/cluster/client/ClusterClientTest.java index 2fab662d63..2248156882 100644 --- a/akka-cluster-tools/src/test/java/org/apache/pekko/cluster/client/ClusterClientTest.java +++ b/akka-cluster-tools/src/test/java/org/apache/pekko/cluster/client/ClusterClientTest.java @@ -22,9 +22,9 @@ public class ClusterClientTest extends JUnitSuite { new AkkaJUnitActorSystemResource( "DistributedPubSubMediatorTest", ConfigFactory.parseString( - "akka.actor.provider = \"cluster\"\n" - + "akka.remote.classic.netty.tcp.port=0\n" - + "akka.remote.artery.canonical.port=0")); + "pekko.actor.provider = \"cluster\"\n" + + "pekko.remote.classic.netty.tcp.port=0\n" + + "pekko.remote.artery.canonical.port=0")); private final ActorSystem system = actorSystemResource.getSystem(); diff --git a/akka-cluster-tools/src/test/java/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorTest.java b/akka-cluster-tools/src/test/java/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorTest.java index 8fb2631c8f..178f390191 100644 --- a/akka-cluster-tools/src/test/java/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorTest.java +++ b/akka-cluster-tools/src/test/java/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorTest.java @@ -26,9 +26,9 @@ public class DistributedPubSubMediatorTest extends JUnitSuite { new AkkaJUnitActorSystemResource( "DistributedPubSubMediatorTest", ConfigFactory.parseString( - "akka.actor.provider = \"cluster\"\n" - + "akka.remote.classic.netty.tcp.port=0\n" - + "akka.remote.artery.canonical.port=0")); + "pekko.actor.provider = \"cluster\"\n" + + "pekko.remote.classic.netty.tcp.port=0\n" + + "pekko.remote.artery.canonical.port=0")); private final ActorSystem system = actorSystemResource.getSystem(); diff --git a/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorDeadLettersSpec.scala b/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorDeadLettersSpec.scala index d0c1238ac1..01a500704e 100644 --- a/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorDeadLettersSpec.scala +++ b/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorDeadLettersSpec.scala @@ -14,12 +14,12 @@ import pekko.testkit._ object DistributedPubSubMediatorDeadLettersSpec { def config(sendToDeadLettersWhenNoSubscribers: Boolean) = s""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port=0 - akka.remote.artery.canonical.port=0 - akka.remote.log-remote-lifecycle-events = off - akka.cluster.pub-sub.send-to-dead-letters-when-no-subscribers = $sendToDeadLettersWhenNoSubscribers + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port=0 + pekko.remote.artery.canonical.port=0 + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.pub-sub.send-to-dead-letters-when-no-subscribers = $sendToDeadLettersWhenNoSubscribers """ } diff --git a/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorRouterSpec.scala b/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorRouterSpec.scala index 33504be168..8fef3aef43 100644 --- a/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorRouterSpec.scala +++ b/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/pubsub/DistributedPubSubMediatorRouterSpec.scala @@ -20,12 +20,12 @@ case class UnwrappedMessage(msg: String) object DistributedPubSubMediatorRouterSpec { def config(routingLogic: String) = s""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port=0 - akka.remote.artery.canonical.port=0 - akka.remote.log-remote-lifecycle-events = off - akka.cluster.pub-sub.routing-logic = $routingLogic + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port=0 + pekko.remote.artery.canonical.port=0 + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.pub-sub.routing-logic = $routingLogic """ } @@ -127,7 +127,7 @@ class DistributedPubSubMediatorWithHashRouterSpec val config = ConfigFactory .parseString(DistributedPubSubMediatorRouterSpec.config("random")) .withFallback(system.settings.config) - .getConfig("akka.cluster.pub-sub") + .getConfig("pekko.cluster.pub-sub") DistributedPubSubSettings(config).withRoutingLogic(ConsistentHashingRoutingLogic(system)) } } diff --git a/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonLeaseSpec.scala b/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonLeaseSpec.scala index fd647e5986..18994b4d82 100644 --- a/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonLeaseSpec.scala +++ b/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonLeaseSpec.scala @@ -46,10 +46,10 @@ class ImportantSingleton(lifeCycleProbe: ActorRef) extends Actor with ActorLoggi } class ClusterSingletonLeaseSpec extends AkkaSpec(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = cluster + pekko.loglevel = INFO + pekko.actor.provider = cluster - akka.cluster.singleton { + pekko.cluster.singleton { use-lease = "test-lease" lease-retry-interval = 2000ms } diff --git a/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala b/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala index 83b9c20517..81167048b0 100644 --- a/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala +++ b/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala @@ -44,17 +44,17 @@ object ClusterSingletonLeavingSpeedSpec { class ClusterSingletonLeavingSpeedSpec extends AkkaSpec( """ - akka.loglevel = DEBUG - akka.actor.provider = org.apache.pekko.cluster.ClusterActorRefProvider - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 2s + pekko.loglevel = DEBUG + pekko.actor.provider = org.apache.pekko.cluster.ClusterActorRefProvider + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 2s # With 10 systems and setting min-number-of-hand-over-retries to 5 and gossip-interval to 2s it's possible to # reproduce the ClusterSingletonManagerIsStuck and slow hand over in issue #25639 - # akka.cluster.singleton.min-number-of-hand-over-retries = 5 - # akka.cluster.gossip-interval = 2s + # pekko.cluster.singleton.min-number-of-hand-over-retries = 5 + # pekko.cluster.gossip-interval = 2s - akka.remote { + pekko.remote { classic.netty.tcp { hostname = "127.0.0.1" port = 0 @@ -67,7 +67,7 @@ class ClusterSingletonLeavingSpeedSpec """) { private val systems = (1 to 3).map { n => - val roleConfig = ConfigFactory.parseString(s"""akka.cluster.roles=[role-${n % 3}]""") + val roleConfig = ConfigFactory.parseString(s"""pekko.cluster.roles=[role-${n % 3}]""") ActorSystem(system.name, roleConfig.withFallback(system.settings.config)) } private val probes = systems.map(TestProbe()(_)) diff --git a/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonProxySpec.scala b/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonProxySpec.scala index 8e273bbe4a..380aaec751 100644 --- a/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonProxySpec.scala +++ b/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonProxySpec.scala @@ -69,7 +69,7 @@ object ClusterSingletonProxySpec { } val cfg = """ - akka { + pekko { loglevel = INFO cluster.jmx.enabled = off actor.provider = "cluster" diff --git a/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonRestart2Spec.scala b/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonRestart2Spec.scala index ba40f59c2c..cb715cce58 100644 --- a/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonRestart2Spec.scala +++ b/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonRestart2Spec.scala @@ -31,13 +31,13 @@ object ClusterSingletonRestart2Spec { class ClusterSingletonRestart2Spec extends AkkaSpec(""" - akka.loglevel = INFO - akka.cluster.roles = [singleton] - akka.actor.provider = org.apache.pekko.cluster.ClusterActorRefProvider - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 2s - akka.cluster.singleton.min-number-of-hand-over-retries = 5 - akka.remote { + pekko.loglevel = INFO + pekko.cluster.roles = [singleton] + pekko.actor.provider = org.apache.pekko.cluster.ClusterActorRefProvider + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 2s + pekko.cluster.singleton.min-number-of-hand-over-retries = 5 + pekko.remote { classic.netty.tcp { hostname = "127.0.0.1" port = 0 @@ -47,7 +47,7 @@ class ClusterSingletonRestart2Spec port = 0 } } - akka.actor.serialization-bindings { + pekko.actor.serialization-bindings { # there is no serializer for UniqueAddress, not intended to be sent as a standalone message "org.apache.pekko.cluster.UniqueAddress" = jackson-cbor } @@ -57,7 +57,7 @@ class ClusterSingletonRestart2Spec val sys2 = ActorSystem(system.name, system.settings.config) val sys3 = ActorSystem( system.name, - ConfigFactory.parseString("akka.cluster.roles = [other]").withFallback(system.settings.config)) + ConfigFactory.parseString("pekko.cluster.roles = [other]").withFallback(system.settings.config)) var sys4: ActorSystem = null import pekko.util.ccompat._ @@ -110,8 +110,8 @@ class ClusterSingletonRestart2Spec val sys4Config = ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port=$sys2port - akka.remote.classic.netty.tcp.port=$sys2port + pekko.remote.artery.canonical.port=$sys2port + pekko.remote.classic.netty.tcp.port=$sys2port """).withFallback(system.settings.config) ActorSystem(system.name, sys4Config) diff --git a/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonRestartSpec.scala b/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonRestartSpec.scala index be7464068c..affda39aa4 100644 --- a/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonRestartSpec.scala +++ b/akka-cluster-tools/src/test/scala/org/apache/pekko/cluster/singleton/ClusterSingletonRestartSpec.scala @@ -19,11 +19,11 @@ import pekko.testkit.TestProbe class ClusterSingletonRestartSpec extends AkkaSpec(""" - akka.loglevel = INFO - akka.actor.provider = org.apache.pekko.cluster.ClusterActorRefProvider - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 2s - akka.remote { + pekko.loglevel = INFO + pekko.actor.provider = org.apache.pekko.cluster.ClusterActorRefProvider + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 2s + pekko.remote { classic.netty.tcp { hostname = "127.0.0.1" port = 0 @@ -81,8 +81,8 @@ class ClusterSingletonRestartSpec val sys3Config = ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port=$sys1port - akka.remote.classic.netty.tcp.port=$sys1port + pekko.remote.artery.canonical.port=$sys1port + pekko.remote.classic.netty.tcp.port=$sys1port """).withFallback(system.settings.config) ActorSystem(system.name, sys3Config) diff --git a/akka-cluster-typed/src/main/resources/reference.conf b/akka-cluster-typed/src/main/resources/reference.conf index 15400e0e3c..064fcf7d4e 100644 --- a/akka-cluster-typed/src/main/resources/reference.conf +++ b/akka-cluster-typed/src/main/resources/reference.conf @@ -1,11 +1,11 @@ ############################################ -# Akka Cluster Typed Reference Config File # +# Pekko Cluster Typed Reference Config File # ############################################ # This is the reference config file that contains all the default settings. # Make your edits/overrides in your application.conf. -akka.cluster.typed.receptionist { +pekko.cluster.typed.receptionist { # Updates with Distributed Data are done with this consistency level. # Possible values: local, majority, all, 2, 3, 4 (n) write-consistency = local @@ -26,13 +26,13 @@ akka.cluster.typed.receptionist { distributed-key-count = 5 # Settings for the Distributed Data replicator used by Receptionist. - # Same layout as akka.cluster.distributed-data. - distributed-data = ${akka.cluster.distributed-data} + # Same layout as pekko.cluster.distributed-data. + distributed-data = ${pekko.cluster.distributed-data} # make sure that by default it's for all roles (Play loads config in different way) distributed-data.role = "" } -akka.cluster.ddata.typed { +pekko.cluster.ddata.typed { # The timeout to use for ask operations in ReplicatorMessageAdapter. # This should be longer than the timeout given in Replicator.WriteConsistency and # Replicator.ReadConsistency. The replicator will always send a reply within those @@ -44,7 +44,7 @@ akka.cluster.ddata.typed { replicator-message-adapter-unexpected-ask-timeout = 20 s } -akka { +pekko { actor { serialization-identifiers { "org.apache.pekko.cluster.typed.internal.AkkaClusterTypedSerializer" = 28 diff --git a/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/DistributedData.scala b/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/DistributedData.scala index 3be3953069..3b4d19285d 100644 --- a/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/DistributedData.scala +++ b/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/DistributedData.scala @@ -62,7 +62,7 @@ object DistributedData extends ExtensionId[DistributedData] { /** * Akka extension for convenient configuration and use of the * [[Replicator]]. Configuration settings are defined in the - * `akka.cluster.ddata` section, see `reference.conf`. + * `pekko.cluster.ddata` section, see `reference.conf`. * * This is using the same underlying `Replicator` instance as * [[pekko.cluster.ddata.DistributedData]] and that means that typed diff --git a/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/ReplicatorSettings.scala b/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/ReplicatorSettings.scala index 2a1d501f7c..ad7e3445b2 100644 --- a/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/ReplicatorSettings.scala +++ b/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/ReplicatorSettings.scala @@ -15,14 +15,14 @@ object ReplicatorSettings { /** * Create settings from the default configuration - * `akka.cluster.distributed-data`. + * `pekko.cluster.distributed-data`. */ def create(system: ActorSystem[_]): dd.ReplicatorSettings = dd.ReplicatorSettings(system.toClassic) /** * Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.distributed-data`. + * the default configuration `pekko.cluster.distributed-data`. */ def create(config: Config): dd.ReplicatorSettings = dd.ReplicatorSettings(config) diff --git a/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/DistributedData.scala b/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/DistributedData.scala index 35d5cc8caf..e954064488 100644 --- a/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/DistributedData.scala +++ b/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/DistributedData.scala @@ -61,7 +61,7 @@ object DistributedData extends ExtensionId[DistributedData] { /** * Akka extension for convenient configuration and use of the * [[Replicator]]. Configuration settings are defined in the - * `akka.cluster.ddata` section, see `reference.conf`. + * `pekko.cluster.ddata` section, see `reference.conf`. * * This is using the same underlying `Replicator` instance as * [[pekko.cluster.ddata.DistributedData]] and that means that typed @@ -75,7 +75,7 @@ class DistributedData(system: ActorSystem[_]) extends Extension { /** INTERNAL API */ @InternalApi private[pekko] val unexpectedAskTimeout: FiniteDuration = system.settings.config - .getDuration("akka.cluster.ddata.typed.replicator-message-adapter-unexpected-ask-timeout") + .getDuration("pekko.cluster.ddata.typed.replicator-message-adapter-unexpected-ask-timeout") .asScala private val classicSystem = system.toClassic.asInstanceOf[ExtendedActorSystem] diff --git a/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/ReplicatorSettings.scala b/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/ReplicatorSettings.scala index 77756ed261..ee0fececdc 100644 --- a/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/ReplicatorSettings.scala +++ b/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/ReplicatorSettings.scala @@ -19,14 +19,14 @@ object ReplicatorSettings { /** * Create settings from the default configuration - * `akka.cluster.distributed-data`. + * `pekko.cluster.distributed-data`. */ def apply(system: ActorSystem[_]): ReplicatorSettings = dd.ReplicatorSettings(system.toClassic) /** * Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.distributed-data`. + * the default configuration `pekko.cluster.distributed-data`. */ def apply(config: Config): ReplicatorSettings = dd.ReplicatorSettings(config) diff --git a/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/ClusterSingleton.scala b/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/ClusterSingleton.scala index 60ad7a9330..1d67f78ce9 100644 --- a/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/ClusterSingleton.scala +++ b/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/ClusterSingleton.scala @@ -21,7 +21,7 @@ import com.typesafe.config.Config object ClusterSingletonSettings { def apply(system: ActorSystem[_]): ClusterSingletonSettings = - fromConfig(system.settings.config.getConfig("akka.cluster")) + fromConfig(system.settings.config.getConfig("pekko.cluster")) /** * Java API @@ -225,15 +225,15 @@ object ClusterSingletonManagerSettings { /** * Create settings from the default configuration - * `akka.cluster.singleton`. + * `pekko.cluster.singleton`. */ def apply(system: ActorSystem[_]): ClusterSingletonManagerSettings = - apply(system.settings.config.getConfig("akka.cluster.singleton")) + apply(system.settings.config.getConfig("pekko.cluster.singleton")) .withRemovalMargin(pekko.cluster.Cluster(system).downingProvider.downRemovalMargin) /** * Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.singleton`. + * the default configuration `pekko.cluster.singleton`. */ def apply(config: Config): ClusterSingletonManagerSettings = { val lease = config.getString("use-lease") match { @@ -251,13 +251,13 @@ object ClusterSingletonManagerSettings { /** * Java API: Create settings from the default configuration - * `akka.cluster.singleton`. + * `pekko.cluster.singleton`. */ def create(system: ActorSystem[_]): ClusterSingletonManagerSettings = apply(system) /** * Java API: Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.singleton`. + * the default configuration `pekko.cluster.singleton`. */ def create(config: Config): ClusterSingletonManagerSettings = apply(config) diff --git a/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/internal/receptionist/ClusterReceptionistConfigCompatChecker.scala b/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/internal/receptionist/ClusterReceptionistConfigCompatChecker.scala index ab80e6d1d4..4ed36408af 100644 --- a/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/internal/receptionist/ClusterReceptionistConfigCompatChecker.scala +++ b/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/internal/receptionist/ClusterReceptionistConfigCompatChecker.scala @@ -18,7 +18,7 @@ import pekko.cluster.{ ConfigValidation, JoinConfigCompatChecker, Valid } @InternalApi private[pekko] final class ClusterReceptionistConfigCompatChecker extends JoinConfigCompatChecker { - override def requiredKeys = "akka.cluster.typed.receptionist.distributed-key-count" :: Nil + override def requiredKeys = "pekko.cluster.typed.receptionist.distributed-key-count" :: Nil override def check(toCheck: Config, actualConfig: Config): ConfigValidation = if (toCheck.hasPath(requiredKeys.head)) diff --git a/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala b/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala index b87d9ae0a3..05ff210e89 100644 --- a/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala +++ b/akka-cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala @@ -22,7 +22,7 @@ import pekko.util.Helpers.toRootLowerCase @InternalApi private[pekko] object ClusterReceptionistSettings { def apply(system: ActorSystem[_]): ClusterReceptionistSettings = - apply(system.settings.config.getConfig("akka.cluster.typed.receptionist")) + apply(system.settings.config.getConfig("pekko.cluster.typed.receptionist")) def apply(config: Config): ClusterReceptionistSettings = { val writeTimeout = 5.seconds // the timeout is not important diff --git a/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/ChunkLargeMessageSpec.scala b/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/ChunkLargeMessageSpec.scala index 14eda19301..cf1d061de9 100644 --- a/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/ChunkLargeMessageSpec.scala +++ b/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/ChunkLargeMessageSpec.scala @@ -28,9 +28,9 @@ object ChunkLargeMessageSpec extends MultiNodeConfig { val second = role("second") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - #akka.serialization.jackson.verbose-debug-logging = on - akka.remote.artery { + pekko.loglevel = INFO + #pekko.serialization.jackson.verbose-debug-logging = on + pekko.remote.artery { advanced.inbound-lanes = 1 advanced.maximum-frame-size = 2 MB } diff --git a/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/MultiDcClusterSingletonSpec.scala b/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/MultiDcClusterSingletonSpec.scala index 768315e903..c4d68dc58c 100644 --- a/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/MultiDcClusterSingletonSpec.scala +++ b/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/MultiDcClusterSingletonSpec.scala @@ -21,15 +21,15 @@ object MultiDcClusterSingletonSpecConfig extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = DEBUG + pekko.loglevel = DEBUG """).withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first)(ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = "dc1" + pekko.cluster.multi-data-center.self-data-center = "dc1" """)) nodeConfig(second, third)(ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = "dc2" + pekko.cluster.multi-data-center.self-data-center = "dc2" """)) testTransport(on = true) diff --git a/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/PubSubSpec.scala b/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/PubSubSpec.scala index 24cd73706c..5faeaa30e7 100644 --- a/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/PubSubSpec.scala +++ b/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/PubSubSpec.scala @@ -23,15 +23,15 @@ object PubSubSpecConfig extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO + pekko.loglevel = INFO """).withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first)(ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = "dc1" + pekko.cluster.multi-data-center.self-data-center = "dc1" """)) nodeConfig(second, third)(ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = "dc2" + pekko.cluster.multi-data-center.self-data-center = "dc2" """)) case class Message(msg: String) extends CborSerializable diff --git a/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/internal/ClusterReceptionistUnreachabilitySpec.scala b/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/internal/ClusterReceptionistUnreachabilitySpec.scala index 2262766ddb..35879ce0ef 100644 --- a/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/internal/ClusterReceptionistUnreachabilitySpec.scala +++ b/akka-cluster-typed/src/multi-jvm/scala/org/apache/pekko/cluster/typed/internal/ClusterReceptionistUnreachabilitySpec.scala @@ -28,7 +28,7 @@ object ClusterReceptionistUnreachabilitySpecConfig extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO + pekko.loglevel = INFO """).withFallback(MultiNodeClusterSpec.clusterConfig)) testTransport(on = true) diff --git a/akka-cluster-typed/src/test/java/jdocs/org/apache/pekko/cluster/ddata/typed/javadsl/ReplicatorDocTest.java b/akka-cluster-typed/src/test/java/jdocs/org/apache/pekko/cluster/ddata/typed/javadsl/ReplicatorDocTest.java index 86686e3457..5f3170b34c 100644 --- a/akka-cluster-typed/src/test/java/jdocs/org/apache/pekko/cluster/ddata/typed/javadsl/ReplicatorDocTest.java +++ b/akka-cluster-typed/src/test/java/jdocs/org/apache/pekko/cluster/ddata/typed/javadsl/ReplicatorDocTest.java @@ -29,10 +29,10 @@ public class ReplicatorDocTest extends JUnitSuite { static Config config = ConfigFactory.parseString( - "akka.actor.provider = cluster \n" - + "akka.remote.classic.netty.tcp.port = 0 \n" - + "akka.remote.artery.canonical.port = 0 \n" - + "akka.remote.artery.canonical.hostname = 127.0.0.1 \n"); + "pekko.actor.provider = cluster \n" + + "pekko.remote.classic.netty.tcp.port = 0 \n" + + "pekko.remote.artery.canonical.port = 0 \n" + + "pekko.remote.artery.canonical.hostname = 127.0.0.1 \n"); @ClassRule public static TestKitJunitResource testKit = new TestKitJunitResource(config); diff --git a/akka-cluster-typed/src/test/java/jdocs/org/apache/pekko/cluster/typed/BasicClusterExampleTest.java b/akka-cluster-typed/src/test/java/jdocs/org/apache/pekko/cluster/typed/BasicClusterExampleTest.java index b27d072bb7..a9e4e49d03 100644 --- a/akka-cluster-typed/src/test/java/jdocs/org/apache/pekko/cluster/typed/BasicClusterExampleTest.java +++ b/akka-cluster-typed/src/test/java/jdocs/org/apache/pekko/cluster/typed/BasicClusterExampleTest.java @@ -31,7 +31,7 @@ public class BasicClusterExampleTest { // extends JUnitSuite { private Config clusterConfig = ConfigFactory.parseString( - "akka { \n" + "pekko { \n" + " actor.provider = cluster \n" + " remote.artery { \n" + " canonical { \n" @@ -43,8 +43,8 @@ public class BasicClusterExampleTest { // extends JUnitSuite { private Config noPort = ConfigFactory.parseString( - " akka.remote.classic.netty.tcp.port = 0 \n" - + " akka.remote.artery.canonical.port = 0 \n"); + " pekko.remote.classic.netty.tcp.port = 0 \n" + + " pekko.remote.artery.canonical.port = 0 \n"); // @Test public void clusterApiExample() { diff --git a/akka-cluster-typed/src/test/java/org/apache/pekko/cluster/typed/ClusterApiTest.java b/akka-cluster-typed/src/test/java/org/apache/pekko/cluster/typed/ClusterApiTest.java index e3e57195d6..9a53f98ca1 100644 --- a/akka-cluster-typed/src/test/java/org/apache/pekko/cluster/typed/ClusterApiTest.java +++ b/akka-cluster-typed/src/test/java/org/apache/pekko/cluster/typed/ClusterApiTest.java @@ -20,13 +20,13 @@ public class ClusterApiTest extends JUnitSuite { public void joinLeaveAndObserve() throws Exception { Config config = ConfigFactory.parseString( - "akka.actor.provider = cluster \n" - + "akka.remote.classic.netty.tcp.port = 0 \n" - + "akka.remote.artery.canonical.port = 0 \n" - + "akka.remote.artery.canonical.hostname = 127.0.0.1 \n" - + "akka.cluster.jmx.multi-mbeans-in-same-jvm = on \n" - + "akka.coordinated-shutdown.terminate-actor-system = off \n" - + "akka.coordinated-shutdown.run-by-actor-system-terminate = off \n"); + "pekko.actor.provider = cluster \n" + + "pekko.remote.classic.netty.tcp.port = 0 \n" + + "pekko.remote.artery.canonical.port = 0 \n" + + "pekko.remote.artery.canonical.hostname = 127.0.0.1 \n" + + "pekko.cluster.jmx.multi-mbeans-in-same-jvm = on \n" + + "pekko.coordinated-shutdown.terminate-actor-system = off \n" + + "pekko.coordinated-shutdown.run-by-actor-system-terminate = off \n"); ActorSystem system1 = ActorSystem.wrap(org.apache.pekko.actor.ActorSystem.create("ClusterApiTest", config)); diff --git a/akka-cluster-typed/src/test/scala/docs/org/apache/pekko/cluster/ddata/typed/scaladsl/ReplicatorDocSpec.scala b/akka-cluster-typed/src/test/scala/docs/org/apache/pekko/cluster/ddata/typed/scaladsl/ReplicatorDocSpec.scala index e637726bfd..e1f980cc0c 100644 --- a/akka-cluster-typed/src/test/scala/docs/org/apache/pekko/cluster/ddata/typed/scaladsl/ReplicatorDocSpec.scala +++ b/akka-cluster-typed/src/test/scala/docs/org/apache/pekko/cluster/ddata/typed/scaladsl/ReplicatorDocSpec.scala @@ -27,10 +27,10 @@ import pekko.cluster.ddata.typed.scaladsl.Replicator._ object ReplicatorDocSpec { val config = ConfigFactory.parseString(""" - akka.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.artery.canonical.hostname = 127.0.0.1 + pekko.actor.provider = cluster + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.hostname = 127.0.0.1 """) // #sample diff --git a/akka-cluster-typed/src/test/scala/docs/org/apache/pekko/cluster/typed/BasicClusterExampleSpec.scala b/akka-cluster-typed/src/test/scala/docs/org/apache/pekko/cluster/typed/BasicClusterExampleSpec.scala index 63adaf1c98..1114e1ff58 100644 --- a/akka-cluster-typed/src/test/scala/docs/org/apache/pekko/cluster/typed/BasicClusterExampleSpec.scala +++ b/akka-cluster-typed/src/test/scala/docs/org/apache/pekko/cluster/typed/BasicClusterExampleSpec.scala @@ -28,9 +28,9 @@ import scala.concurrent.duration._ object BasicClusterExampleSpec { val configSystem1 = ConfigFactory.parseString(s""" -akka.loglevel = DEBUG +pekko.loglevel = DEBUG #config-seeds -akka { +pekko { actor { provider = "cluster" } @@ -53,8 +53,8 @@ akka { """) val configSystem2 = ConfigFactory.parseString(s""" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """).withFallback(configSystem1) def illustrateJoinSeedNodes(): Unit = { @@ -121,10 +121,10 @@ class BasicClusterConfigSpec extends AnyWordSpec with ScalaFutures with Eventual val sys1Port = SocketUtil.temporaryLocalPort() val sys2Port = SocketUtil.temporaryLocalPort() def config(port: Int) = ConfigFactory.parseString(s""" - akka.remote.classic.netty.tcp.port = $port - akka.remote.artery.canonical.port = $port - akka.cluster.jmx.multi-mbeans-in-same-jvm = on - akka.cluster.seed-nodes = [ "akka://ClusterSystem@127.0.0.1:$sys1Port", "akka://ClusterSystem@127.0.0.1:$sys2Port" ] + pekko.remote.classic.netty.tcp.port = $port + pekko.remote.artery.canonical.port = $port + pekko.cluster.jmx.multi-mbeans-in-same-jvm = on + pekko.cluster.seed-nodes = [ "akka://ClusterSystem@127.0.0.1:$sys1Port", "akka://ClusterSystem@127.0.0.1:$sys2Port" ] """) val system1 = @@ -145,10 +145,10 @@ class BasicClusterConfigSpec extends AnyWordSpec with ScalaFutures with Eventual object BasicClusterManualSpec { val clusterConfig = ConfigFactory.parseString(s""" -akka.loglevel = DEBUG -akka.cluster.jmx.multi-mbeans-in-same-jvm = on +pekko.loglevel = DEBUG +pekko.cluster.jmx.multi-mbeans-in-same-jvm = on #config -akka { +pekko { actor.provider = "cluster" remote.artery { canonical { @@ -161,8 +161,8 @@ akka { """) val noPort = ConfigFactory.parseString(""" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """) } diff --git a/akka-cluster-typed/src/test/scala/docs/org/apache/pekko/cluster/typed/DistributedPubSubExample.scala b/akka-cluster-typed/src/test/scala/docs/org/apache/pekko/cluster/typed/DistributedPubSubExample.scala index d88d7488f1..2d075aa2f1 100644 --- a/akka-cluster-typed/src/test/scala/docs/org/apache/pekko/cluster/typed/DistributedPubSubExample.scala +++ b/akka-cluster-typed/src/test/scala/docs/org/apache/pekko/cluster/typed/DistributedPubSubExample.scala @@ -287,14 +287,14 @@ object DistributedPubSubExample { import Ontology._ val config: Config = ConfigFactory.parseString(s""" - akka.actor.provider = "cluster" - akka.cluster.pub-sub.max-delta-elements = 500 - akka.cluster.jmx.enabled = off - akka.remote.artery.canonical.hostname = 127.0.0.1 - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.loglevel = INFO - akka.loggers = ["org.apache.pekko.testkit.TestEventListener"] + pekko.actor.provider = "cluster" + pekko.cluster.pub-sub.max-delta-elements = 500 + pekko.cluster.jmx.enabled = off + pekko.remote.artery.canonical.hostname = 127.0.0.1 + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.loglevel = INFO + pekko.loggers = ["org.apache.pekko.testkit.TestEventListener"] """) def createCluster(nodes: List[ActorSystem[_]]): Unit = { diff --git a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ActorRefIgnoreSerializationSpec.scala b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ActorRefIgnoreSerializationSpec.scala index a44f17d8b9..e88db0aca2 100644 --- a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ActorRefIgnoreSerializationSpec.scala +++ b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ActorRefIgnoreSerializationSpec.scala @@ -23,7 +23,7 @@ class ActorRefIgnoreSerializationSpec extends AnyWordSpec with ScalaFutures with private var system2: ActorSystem[String] = _ val config = ConfigFactory.parseString(s""" - akka { + pekko { loglevel = debug actor.provider = cluster remote.classic.netty.tcp.port = 0 diff --git a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ActorSystemSpec.scala b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ActorSystemSpec.scala index be2cce4828..64279b42e8 100644 --- a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ActorSystemSpec.scala +++ b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ActorSystemSpec.scala @@ -69,17 +69,17 @@ class ActorSystemSpec with Eventually with LogCapturing { - private val testKitSettings = TestKitSettings(ConfigFactory.load().getConfig("akka.actor.testkit.typed")) + private val testKitSettings = TestKitSettings(ConfigFactory.load().getConfig("pekko.actor.testkit.typed")) override implicit val patienceConfig: PatienceConfig = PatienceConfig(testKitSettings.SingleExpectDefaultTimeout, Span(100, org.scalatest.time.Millis)) val config = ConfigFactory.parseString(""" - akka.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.artery.canonical.hostname = 127.0.0.1 + pekko.actor.provider = cluster + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.hostname = 127.0.0.1 - akka.actor { + pekko.actor { serializers { test = "org.apache.pekko.cluster.typed.ActorSystemSpec$TestSerializer" } diff --git a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterActorLoggingSpec.scala b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterActorLoggingSpec.scala index 61fa168863..f8ba204277 100644 --- a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterActorLoggingSpec.scala +++ b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterActorLoggingSpec.scala @@ -18,10 +18,10 @@ import pekko.actor.typed.scaladsl.Behaviors object ClusterActorLoggingSpec { def config = ConfigFactory.parseString(""" - akka.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.artery.canonical.hostname = 127.0.0.1 + pekko.actor.provider = cluster + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.hostname = 127.0.0.1 """) } diff --git a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterApiSpec.scala b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterApiSpec.scala index f7f27ec1f1..98b76d1641 100644 --- a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterApiSpec.scala +++ b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterApiSpec.scala @@ -21,17 +21,17 @@ import pekko.cluster.MemberStatus object ClusterApiSpec { val config = ConfigFactory.parseString(""" - akka.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.artery.canonical.hostname = 127.0.0.1 - akka.cluster.jmx.multi-mbeans-in-same-jvm = on - akka.coordinated-shutdown.terminate-actor-system = off - akka.coordinated-shutdown.run-by-actor-system-terminate = off + pekko.actor.provider = cluster + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.hostname = 127.0.0.1 + pekko.cluster.jmx.multi-mbeans-in-same-jvm = on + pekko.coordinated-shutdown.terminate-actor-system = off + pekko.coordinated-shutdown.run-by-actor-system-terminate = off # generous timeout for cluster forming probes - akka.actor.testkit.typed.default-timeout = 10s + pekko.actor.testkit.typed.default-timeout = 10s # disable this or we cannot be sure to observe node end state on the leaving side - akka.cluster.run-coordinated-shutdown-when-down = off + pekko.cluster.run-coordinated-shutdown-when-down = off """) } diff --git a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterDispatcherSelectorSpec.scala b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterDispatcherSelectorSpec.scala index 3fe292a03f..190551a85f 100644 --- a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterDispatcherSelectorSpec.scala +++ b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterDispatcherSelectorSpec.scala @@ -10,7 +10,7 @@ import org.apache.pekko.actor.typed.scaladsl.DispatcherSelectorSpec class ClusterDispatcherSelectorSpec extends DispatcherSelectorSpec(ConfigFactory.parseString(""" - akka.actor.provider = cluster + pekko.actor.provider = cluster """).withFallback(DispatcherSelectorSpec.config)) { // same tests as in DispatcherSelectorSpec diff --git a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterSingletonApiSpec.scala b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterSingletonApiSpec.scala index 79d0a2e8d1..f9f5091779 100644 --- a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterSingletonApiSpec.scala +++ b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/ClusterSingletonApiSpec.scala @@ -23,11 +23,11 @@ import pekko.serialization.jackson.CborSerializable object ClusterSingletonApiSpec { val config = ConfigFactory.parseString(s""" - akka.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.artery.canonical.hostname = 127.0.0.1 - akka.cluster.jmx.multi-mbeans-in-same-jvm = on + pekko.actor.provider = cluster + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.hostname = 127.0.0.1 + pekko.cluster.jmx.multi-mbeans-in-same-jvm = on """) sealed trait PingProtocol @@ -61,7 +61,7 @@ class ClusterSingletonApiSpec val system2 = pekko.actor.ActorSystem( system.name, ConfigFactory.parseString(""" - akka.cluster.roles = ["singleton"] + pekko.cluster.roles = ["singleton"] """).withFallback(system.settings.config)) val adaptedSystem2 = system2.toTyped val clusterNode2 = Cluster(adaptedSystem2) diff --git a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/GroupRouterSpec.scala b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/GroupRouterSpec.scala index 627b056434..696d2c02ce 100644 --- a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/GroupRouterSpec.scala +++ b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/GroupRouterSpec.scala @@ -23,7 +23,7 @@ import scala.concurrent.duration._ object GroupRouterSpec { def config = ConfigFactory.parseString(s""" - akka { + pekko { loglevel = debug actor.provider = cluster remote.classic.netty.tcp.port = 0 diff --git a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/RemoteContextAskSpec.scala b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/RemoteContextAskSpec.scala index 84a977902b..cb8ec463ee 100644 --- a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/RemoteContextAskSpec.scala +++ b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/RemoteContextAskSpec.scala @@ -26,7 +26,7 @@ import pekko.util.Timeout object RemoteContextAskSpec { def config = ConfigFactory.parseString(s""" - akka { + pekko { loglevel = debug actor.provider = cluster remote.classic.netty.tcp.port = 0 diff --git a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/RemoteDeployNotAllowedSpec.scala b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/RemoteDeployNotAllowedSpec.scala index 388ba26770..bdc113a2f2 100644 --- a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/RemoteDeployNotAllowedSpec.scala +++ b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/RemoteDeployNotAllowedSpec.scala @@ -17,7 +17,7 @@ import pekko.actor.typed.scaladsl.Behaviors object RemoteDeployNotAllowedSpec { def config = ConfigFactory.parseString(s""" - akka { + pekko { loglevel = warning actor { provider = cluster @@ -34,7 +34,7 @@ object RemoteDeployNotAllowedSpec { """) def configWithRemoteDeployment(otherSystemPort: Int) = ConfigFactory.parseString(s""" - akka.actor.deployment { + pekko.actor.deployment { "/*" { remote = "akka://sampleActorSystem@127.0.0.1:$otherSystemPort" } diff --git a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/RemoteMessageSpec.scala b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/RemoteMessageSpec.scala index 88d4996ffd..e4753f6d4b 100644 --- a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/RemoteMessageSpec.scala +++ b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/RemoteMessageSpec.scala @@ -20,7 +20,7 @@ import pekko.testkit.AkkaSpec object RemoteMessageSpec { def config = ConfigFactory.parseString(s""" - akka { + pekko { loglevel = debug actor.provider = cluster remote.classic.netty.tcp.port = 0 diff --git a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala index ad840990a0..4d9d051fb1 100644 --- a/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala +++ b/akka-cluster-typed/src/test/scala/org/apache/pekko/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala @@ -38,26 +38,26 @@ import pekko.testkit.GHExcludeAeronTest object ClusterReceptionistSpec { val config = ConfigFactory.parseString(s""" - akka.loglevel = DEBUG # issue #24960 - akka.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.classic.netty.tcp.host = 127.0.0.1 - akka.remote.artery.canonical.port = 0 - akka.remote.artery.canonical.hostname = 127.0.0.1 + pekko.loglevel = DEBUG # issue #24960 + pekko.actor.provider = cluster + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.classic.netty.tcp.host = 127.0.0.1 + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.hostname = 127.0.0.1 - akka.remote.retry-gate-closed-for = 1 s + pekko.remote.retry-gate-closed-for = 1 s - akka.cluster.typed.receptionist { + pekko.cluster.typed.receptionist { pruning-interval = 1 s } - akka.cluster { + pekko.cluster { jmx.multi-mbeans-in-same-jvm = on failure-detector.acceptable-heartbeat-pause = 3s } # test coverage that the durable store is not used - akka.cluster.distributed-data.durable.keys = ["*"] + pekko.cluster.distributed-data.durable.keys = ["*"] """) case object Pong extends CborSerializable @@ -365,10 +365,10 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin val testKit3 = ActorTestKit( system1.name, ConfigFactory.parseString(s""" - akka.remote.classic.netty.tcp.port = ${clusterNode2.selfMember.address.port.get} - akka.remote.artery.canonical.port = ${clusterNode2.selfMember.address.port.get} + pekko.remote.classic.netty.tcp.port = ${clusterNode2.selfMember.address.port.get} + pekko.remote.artery.canonical.port = ${clusterNode2.selfMember.address.port.get} # retry joining when existing member removed - akka.cluster.retry-unsuccessful-join-after = 1s + pekko.cluster.retry-unsuccessful-join-after = 1s """).withFallback(config)) try { @@ -433,10 +433,10 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin val testKit1 = ActorTestKit( "ClusterReceptionistSpec-test-7", ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { failure-detector.acceptable-heartbeat-pause = 20s } - akka.cluster.typed.receptionist { + pekko.cluster.typed.receptionist { # it can be stressed more by using all write-consistency = all } @@ -480,8 +480,8 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin val testKit3 = ActorTestKit( system1.name, ConfigFactory.parseString(s""" - akka.remote.classic.netty.tcp.port = ${clusterNode2.selfMember.address.port.get} - akka.remote.artery.canonical.port = ${clusterNode2.selfMember.address.port.get} + pekko.remote.classic.netty.tcp.port = ${clusterNode2.selfMember.address.port.get} + pekko.remote.artery.canonical.port = ${clusterNode2.selfMember.address.port.get} """).withFallback(config)) try { @@ -537,7 +537,7 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin val config = ConfigFactory.parseString(""" # disable delta propagation so we can have repeatable concurrent writes # without delta reaching between nodes already - akka.cluster.distributed-data.delta-crdt.enabled=false + pekko.cluster.distributed-data.delta-crdt.enabled=false """).withFallback(ClusterReceptionistSpec.config) val testKit1 = ActorTestKit("ClusterReceptionistSpec-test-8", config) val system1 = testKit1.system @@ -788,10 +788,10 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin // It's possible that the registry entry from the ddata update arrives before MemberJoined. val config = ConfigFactory.parseString(""" # quick dissemination to increase the chance of the race condition - akka.cluster.typed.receptionist.distributed-data.write-consistency = all - akka.cluster.typed.receptionist.distributed-data.gossip-interval = 500ms + pekko.cluster.typed.receptionist.distributed-data.write-consistency = all + pekko.cluster.typed.receptionist.distributed-data.gossip-interval = 500ms # run the RemoveTick cleanup often to exercise that scenario - akka.cluster.typed.receptionist.pruning-interval = 50ms + pekko.cluster.typed.receptionist.pruning-interval = 50ms """).withFallback(ClusterReceptionistSpec.config) val numberOfNodes = 6 // use 9 or more to stress it more val testKits = Vector.fill(numberOfNodes)(ActorTestKit("ClusterReceptionistSpec-13", config)) diff --git a/akka-cluster/jmx-client/akka-cluster b/akka-cluster/jmx-client/akka-cluster index 684a3d0514..55df10b321 100755 --- a/akka-cluster/jmx-client/akka-cluster +++ b/akka-cluster/jmx-client/akka-cluster @@ -201,7 +201,7 @@ do *) printf "Usage: $0 ...\n" printf "\n" - printf "-p parameter needs is needed when cluster is run with akka.cluster.jmx.multi-mbeans-in-same-jvm = on.¥n" + printf "-p parameter needs is needed when cluster is run with pekko.cluster.jmx.multi-mbeans-in-same-jvm = on.¥n" printf "\n" printf "Supported commands are:\n" printf "%26s - %s\n" "join " "Sends request a JOIN node with the specified URL" diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index 65890b55a0..1073a62d09 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -1,11 +1,11 @@ ###################################### -# Akka Cluster Reference Config File # +# Pekko Cluster Reference Config File # ###################################### # This is the reference config file that contains all the default settings. # Make your edits/overrides in your application.conf. -akka { +pekko { cluster { # Initial contact points of the cluster. @@ -128,8 +128,8 @@ akka { jmx.enabled = on # Enable or disable multiple JMX MBeans in the same JVM - # If this is disabled, the MBean Object name is "akka:type=Cluster" - # If this is enabled, them MBean Object names become "akka:type=Cluster,port=$clusterPortNumber" + # If this is disabled, the MBean Object name is "pekko:type=Cluster" + # If this is enabled, them MBean Object names become "pekko:type=Cluster,port=$clusterPortNumber" jmx.multi-mbeans-in-same-jvm = off # how long should the node wait before starting the periodic tasks @@ -156,7 +156,7 @@ akka { # The id of the dispatcher to use for cluster actors. # If specified you need to define the settings of the actual dispatcher. - use-dispatcher = "akka.actor.internal-dispatcher" + use-dispatcher = "pekko.actor.internal-dispatcher" # Gossip to random node with newer or older state information, if any with # this probability. Otherwise Gossip to any random live node. @@ -227,7 +227,7 @@ akka { # Configures multi-dc specific heartbeating and other mechanisms, # many of them have a direct counter-part in "one datacenter mode", # in which case these settings would not be used at all - they only apply, - # if your cluster nodes are configured with at-least 2 different `akka.cluster.data-center` values. + # if your cluster nodes are configured with at-least 2 different `pekko.cluster.data-center` values. multi-data-center { # Defines which data center this node belongs to. It is typically used to make islands of the @@ -275,7 +275,7 @@ akka { # If the tick-duration of the default scheduler is longer than the # tick-duration configured here a dedicated scheduler will be used for # periodic tasks of the cluster, otherwise the default scheduler is used. - # See akka.scheduler settings for more details. + # See pekko.scheduler settings for more details. scheduler { tick-duration = 33ms ticks-per-wheel = 512 @@ -307,7 +307,7 @@ akka { # Checkers defined in reference.conf can be disabled by application by using empty string value # for the named entry. checkers { - akka-cluster = "org.apache.pekko.cluster.JoinConfigCompatCheckCluster" + pekko-cluster = "org.apache.pekko.cluster.JoinConfigCompatCheckCluster" } # Some configuration properties might not be appropriate to transfer between nodes @@ -318,14 +318,14 @@ akka { # All properties starting with the paths defined here are excluded, i.e. you can add the path of a whole # section here to skip everything inside that section. sensitive-config-paths { - akka = [ + pekko = [ "user.home", "user.name", "user.dir", "socksNonProxyHosts", "http.nonProxyHosts", "ftp.nonProxyHosts", - "akka.remote.secure-cookie", - "akka.remote.classic.netty.ssl.security", + "pekko.remote.secure-cookie", + "pekko.remote.classic.netty.ssl.security", # Pre 2.6 path, keep around to avoid sending things misconfigured with old paths - "akka.remote.netty.ssl.security", - "akka.remote.artery.ssl" + "pekko.remote.netty.ssl.security", + "pekko.remote.artery.ssl" ] } @@ -361,7 +361,7 @@ akka { # Use members with all specified roles, or all members if undefined or empty. use-roles = [] - # Deprecated, since Akka 2.5.4, replaced by use-roles + # Deprecated, since Pekko 2.5.4, replaced by use-roles # Use members with specified role, or all members if undefined or empty. use-role = "" } @@ -369,12 +369,12 @@ akka { # Protobuf serializer for cluster messages actor { serializers { - akka-cluster = "org.apache.pekko.cluster.protobuf.ClusterMessageSerializer" + pekko-cluster = "org.apache.pekko.cluster.protobuf.ClusterMessageSerializer" } serialization-bindings { - "org.apache.pekko.cluster.ClusterMessage" = akka-cluster - "org.apache.pekko.cluster.routing.ClusterRouterPool" = akka-cluster + "org.apache.pekko.cluster.ClusterMessage" = pekko-cluster + "org.apache.pekko.cluster.routing.ClusterRouterPool" = pekko-cluster } serialization-identifiers { @@ -388,9 +388,9 @@ akka { #//#split-brain-resolver # To enable the split brain resolver you first need to enable the provider in your application.conf: -# akka.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" +# pekko.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" -akka.cluster.split-brain-resolver { +pekko.cluster.split-brain-resolver { # Select one of the available strategies (see descriptions below): # static-quorum, keep-majority, keep-oldest, down-all, lease-majority active-strategy = keep-majority @@ -437,7 +437,7 @@ akka.cluster.split-brain-resolver { # consists of 3 nodes each, i.e. each side thinks it has enough nodes to continue by # itself. A warning is logged if this recommendation is violated. #//#static-quorum -akka.cluster.split-brain-resolver.static-quorum { +pekko.cluster.split-brain-resolver.static-quorum { # minimum number of nodes that the cluster must have quorum-size = undefined @@ -452,7 +452,7 @@ akka.cluster.split-brain-resolver.static-quorum { # Note that if there are more than two partitions and none is in majority each part # will shutdown itself, terminating the whole cluster. #//#keep-majority -akka.cluster.split-brain-resolver.keep-majority { +pekko.cluster.split-brain-resolver.keep-majority { # if the 'role' is defined the decision is based only on members with that 'role' role = "" } @@ -469,7 +469,7 @@ akka.cluster.split-brain-resolver.keep-majority { # when 'down-if-alone' is 'on', otherwise they will down themselves if the # oldest node crashes, i.e. shutdown the whole cluster together with the oldest node. #//#keep-oldest -akka.cluster.split-brain-resolver.keep-oldest { +pekko.cluster.split-brain-resolver.keep-oldest { # Enable downing of the oldest node when it is partitioned from all other nodes down-if-alone = on @@ -484,11 +484,11 @@ akka.cluster.split-brain-resolver.keep-oldest { # This is achieved by adding a delay before trying to acquire the lease on the # minority side. #//#lease-majority -akka.cluster.split-brain-resolver.lease-majority { +pekko.cluster.split-brain-resolver.lease-majority { lease-implementation = "" - # The recommended format for the lease name is "-akka-sbr". - # When lease-name is not defined, the name will be set to "-akka-sbr" + # The recommended format for the lease name is "-pekko-sbr". + # When lease-name is not defined, the name will be set to "-pekko-sbr" lease-name = "" # This delay is used on the minority side before trying to acquire the lease, diff --git a/akka-cluster/src/main/scala/org/apache/pekko/cluster/Cluster.scala b/akka-cluster/src/main/scala/org/apache/pekko/cluster/Cluster.scala index 77ab4a0d76..863fef02dd 100644 --- a/akka-cluster/src/main/scala/org/apache/pekko/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/org/apache/pekko/cluster/Cluster.scala @@ -47,7 +47,7 @@ object Cluster extends ExtensionId[Cluster] with ExtensionIdProvider { * INTERNAL API */ private[cluster] final val isAssertInvariantsEnabled: Boolean = - System.getProperty("akka.cluster.assert", "off").toLowerCase match { + System.getProperty("pekko.cluster.assert", "off").toLowerCase match { case "on" | "true" => true case _ => false } @@ -82,7 +82,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { UniqueAddress(c.transport.defaultAddress, AddressUidExtension(system).longAddressUid) case other => throw new ConfigurationException( - s"ActorSystem [${system}] needs to have 'akka.actor.provider' set to 'cluster' in the configuration, currently uses [${other.getClass.getName}]") + s"ActorSystem [${system}] needs to have 'pekko.actor.provider' set to 'cluster' in the configuration, currently uses [${other.getClass.getName}]") } /** @@ -155,12 +155,12 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { if (system.scheduler.maxFrequency < 1.second / SchedulerTickDuration) { logInfo( "Using a dedicated scheduler for cluster. Default scheduler can be used if configured " + - "with 'akka.scheduler.tick-duration' [{} ms] <= 'akka.cluster.scheduler.tick-duration' [{} ms].", + "with 'pekko.scheduler.tick-duration' [{} ms] <= 'pekko.cluster.scheduler.tick-duration' [{} ms].", (1000 / system.scheduler.maxFrequency).toInt, SchedulerTickDuration.toMillis) val cfg = ConfigFactory - .parseString(s"akka.scheduler.tick-duration=${SchedulerTickDuration.toMillis}ms") + .parseString(s"pekko.scheduler.tick-duration=${SchedulerTickDuration.toMillis}ms") .withFallback(system.settings.config) val threadFactory = system.threadFactory match { case tf: MonitorableThreadFactory => tf.withName(tf.name + "-cluster-scheduler") @@ -208,7 +208,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { Await.result((clusterDaemons ? InternalClusterAction.GetClusterCoreRef).mapTo[ActorRef], timeout.duration) } catch { case NonFatal(e) => - log.error(e, "Failed to startup Cluster. You can try to increase 'akka.actor.creation-timeout'.") + log.error(e, "Failed to startup Cluster. You can try to increase 'pekko.actor.creation-timeout'.") shutdown() // don't re-throw, that would cause the extension to be re-recreated // from shutdown() or other places, which may result in @@ -389,7 +389,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { /** * The supplied thunk will be run, once, when current cluster member is `Up`. - * Typically used together with configuration option `akka.cluster.min-nr-of-members` + * Typically used together with configuration option `pekko.cluster.min-nr-of-members` * to defer some action, such as starting actors, until the cluster has reached * a certain size. */ @@ -398,7 +398,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { /** * Java API: The supplied callback will be run, once, when current cluster member is `Up`. - * Typically used together with configuration option `akka.cluster.min-nr-of-members` + * Typically used together with configuration option `pekko.cluster.min-nr-of-members` * to defer some action, such as starting actors, until the cluster has reached * a certain size. */ diff --git a/akka-cluster/src/main/scala/org/apache/pekko/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/org/apache/pekko/cluster/ClusterDaemon.scala index 71c9cf0118..fb73070378 100644 --- a/akka-cluster/src/main/scala/org/apache/pekko/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/org/apache/pekko/cluster/ClusterDaemon.scala @@ -453,7 +453,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh } private def isClusterBootstrapAvailable: Boolean = - context.system.settings.config.hasPath("akka.management.cluster.bootstrap") + context.system.settings.config.hasPath("pekko.management.cluster.bootstrap") override def postStop(): Unit = { context.system.eventStream.unsubscribe(self) @@ -594,7 +594,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh def initJoin(joiningNodeConfig: Config): Unit = { val joiningNodeVersion = - if (joiningNodeConfig.hasPath("akka.version")) joiningNodeConfig.getString("akka.version") + if (joiningNodeConfig.hasPath("pekko.version")) joiningNodeConfig.getString("pekko.version") else "unknown" // When joiningNodeConfig is empty the joining node has version 2.5.9 or earlier. val configCheckUnsupportedByJoiningNode = joiningNodeConfig.isEmpty diff --git a/akka-cluster/src/main/scala/org/apache/pekko/cluster/ClusterJmx.scala b/akka-cluster/src/main/scala/org/apache/pekko/cluster/ClusterJmx.scala index 38f20adb37..098452a2c6 100644 --- a/akka-cluster/src/main/scala/org/apache/pekko/cluster/ClusterJmx.scala +++ b/akka-cluster/src/main/scala/org/apache/pekko/cluster/ClusterJmx.scala @@ -225,7 +225,7 @@ private[pekko] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { } else { log.warning( s"Could not register Cluster JMX MBean with name=$clusterMBeanName as it is already registered. " + - "If you are running multiple clusters in the same JVM, set 'akka.cluster.jmx.multi-mbeans-in-same-jvm = on' in config") + "If you are running multiple clusters in the same JVM, set 'pekko.cluster.jmx.multi-mbeans-in-same-jvm = on' in config") } } } @@ -244,7 +244,7 @@ private[pekko] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { } else { log.warning( s"Could not unregister Cluster JMX MBean with name=$clusterMBeanName as it was not found. " + - "If you are running multiple clusters in the same JVM, set 'akka.cluster.jmx.multi-mbeans-in-same-jvm = on' in config") + "If you are running multiple clusters in the same JVM, set 'pekko.cluster.jmx.multi-mbeans-in-same-jvm = on' in config") } } } diff --git a/akka-cluster/src/main/scala/org/apache/pekko/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/org/apache/pekko/cluster/ClusterSettings.scala index f632955dea..af7215f076 100644 --- a/akka-cluster/src/main/scala/org/apache/pekko/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/org/apache/pekko/cluster/ClusterSettings.scala @@ -37,7 +37,7 @@ object ClusterSettings { final class ClusterSettings(val config: Config, val systemName: String) { import ClusterSettings._ - private val cc = config.getConfig("akka.cluster") + private val cc = config.getConfig("pekko.cluster") val LogInfoVerbose: Boolean = cc.getBoolean("log-info-verbose") val LogInfo: Boolean = LogInfoVerbose || cc.getBoolean("log-info") diff --git a/akka-cluster/src/main/scala/org/apache/pekko/cluster/DowningProvider.scala b/akka-cluster/src/main/scala/org/apache/pekko/cluster/DowningProvider.scala index e38e82ec6c..6152c79cb7 100644 --- a/akka-cluster/src/main/scala/org/apache/pekko/cluster/DowningProvider.scala +++ b/akka-cluster/src/main/scala/org/apache/pekko/cluster/DowningProvider.scala @@ -36,7 +36,7 @@ private[cluster] object DowningProvider { * API for plugins that will handle downing of cluster nodes. Concrete plugins must subclass and * have a public one argument constructor accepting an [[pekko.actor.ActorSystem]]. * - * A custom `DowningProvider` can be configured with `akka.cluster.downing-provider-class` + * A custom `DowningProvider` can be configured with `pekko.cluster.downing-provider-class` * * When implementing a downing provider you should make sure that it will not split the cluster into * several separate clusters in case of network problems or system overload (long GC pauses). This diff --git a/akka-cluster/src/main/scala/org/apache/pekko/cluster/JoinConfigCompatCheckCluster.scala b/akka-cluster/src/main/scala/org/apache/pekko/cluster/JoinConfigCompatCheckCluster.scala index c66712b3c1..d5dc648775 100644 --- a/akka-cluster/src/main/scala/org/apache/pekko/cluster/JoinConfigCompatCheckCluster.scala +++ b/akka-cluster/src/main/scala/org/apache/pekko/cluster/JoinConfigCompatCheckCluster.scala @@ -16,11 +16,11 @@ import pekko.cluster.sbr.SplitBrainResolverProvider * INTERNAL API */ @InternalApi private[pekko] object JoinConfigCompatCheckCluster { - private val DowningProviderPath = "akka.cluster.downing-provider-class" - private val SbrStrategyPath = "akka.cluster.split-brain-resolver.active-strategy" + private val DowningProviderPath = "pekko.cluster.downing-provider-class" + private val SbrStrategyPath = "pekko.cluster.split-brain-resolver.active-strategy" private val AkkaSbrProviderClass = classOf[SplitBrainResolverProvider].getName - private val LightbendSbrProviderClass = "com.lightbend.akka.sbr.SplitBrainResolverProvider" + private val LightbendSbrProviderClass = "com.lightbend.pekko.sbr.SplitBrainResolverProvider" } /** diff --git a/akka-cluster/src/main/scala/org/apache/pekko/cluster/JoinConfigCompatChecker.scala b/akka-cluster/src/main/scala/org/apache/pekko/cluster/JoinConfigCompatChecker.scala index 9fb6ae0cdf..fa907f769d 100644 --- a/akka-cluster/src/main/scala/org/apache/pekko/cluster/JoinConfigCompatChecker.scala +++ b/akka-cluster/src/main/scala/org/apache/pekko/cluster/JoinConfigCompatChecker.scala @@ -110,7 +110,7 @@ object JoinConfigCompatChecker { /** * INTERNAL API - * Removes sensitive keys, as defined in 'akka.cluster.configuration-compatibility-check.sensitive-config-paths', + * Removes sensitive keys, as defined in 'pekko.cluster.configuration-compatibility-check.sensitive-config-paths', * from the passed `requiredKeys` Seq. */ @InternalApi @@ -125,7 +125,7 @@ object JoinConfigCompatChecker { /** * INTERNAL API * Builds a Seq of keys using the passed `Config` not including any sensitive keys, - * as defined in 'akka.cluster.configuration-compatibility-check.sensitive-config-paths'. + * as defined in 'pekko.cluster.configuration-compatibility-check.sensitive-config-paths'. */ @InternalApi private[cluster] def removeSensitiveKeys(config: Config, clusterSettings: ClusterSettings): im.Seq[String] = { @@ -152,8 +152,8 @@ object JoinConfigCompatChecker { // composite checker new JoinConfigCompatChecker { override val requiredKeys: im.Seq[String] = { - // Always include akka.version (used in join logging) - "akka.version" +: checkers.flatMap(_.requiredKeys).to(im.Seq) + // Always include pekko.version (used in join logging) + "pekko.version" +: checkers.flatMap(_.requiredKeys).to(im.Seq) } override def check(toValidate: Config, clusterConfig: Config): ConfigValidation = checkers.foldLeft(Valid: ConfigValidation) { (acc, checker) => diff --git a/akka-cluster/src/main/scala/org/apache/pekko/cluster/SeedNodeProcess.scala b/akka-cluster/src/main/scala/org/apache/pekko/cluster/SeedNodeProcess.scala index 969b87a35a..e15c15638c 100644 --- a/akka-cluster/src/main/scala/org/apache/pekko/cluster/SeedNodeProcess.scala +++ b/akka-cluster/src/main/scala/org/apache/pekko/cluster/SeedNodeProcess.scala @@ -34,7 +34,7 @@ private[cluster] abstract class SeedNodeProcess(joinConfigCompatChecker: JoinCon private val NodeShutdownWarning = "It's recommended to perform a full cluster shutdown in order to deploy this new version. " + "If a cluster shutdown isn't an option, you may want to disable this protection by setting " + - "'akka.cluster.configuration-compatibility-check.enforce-on-join = off'. " + + "'pekko.cluster.configuration-compatibility-check.enforce-on-join = off'. " + "Note that disabling it will allow the formation of a cluster with nodes having incompatible configuration settings. " + "This node will be shutdown!" diff --git a/akka-cluster/src/main/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverProvider.scala b/akka-cluster/src/main/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverProvider.scala index d9c630ad95..401ac94774 100644 --- a/akka-cluster/src/main/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverProvider.scala +++ b/akka-cluster/src/main/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverProvider.scala @@ -19,7 +19,7 @@ import pekko.coordination.lease.scaladsl.LeaseProvider * * Enabled with configuration: * {{{ - * akka.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" + * pekko.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" * }}} */ final class SplitBrainResolverProvider(system: ActorSystem) extends DowningProvider { diff --git a/akka-cluster/src/main/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverSettings.scala b/akka-cluster/src/main/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverSettings.scala index 787337c535..667da16750 100644 --- a/akka-cluster/src/main/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverSettings.scala +++ b/akka-cluster/src/main/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverSettings.scala @@ -38,7 +38,7 @@ import pekko.util.Helpers.Requiring import SplitBrainResolverSettings._ - private val cc = config.getConfig("akka.cluster.split-brain-resolver") + private val cc = config.getConfig("pekko.cluster.split-brain-resolver") val DowningStableAfter: FiniteDuration = { val key = "stable-after" @@ -76,7 +76,7 @@ import pekko.util.Helpers.Requiring val c = strategyConfig(StaticQuorumName) val size = c .getInt("quorum-size") - .requiring(_ >= 1, s"akka.cluster.split-brain-resolver.$StaticQuorumName.quorum-size must be >= 1") + .requiring(_ >= 1, s"pekko.cluster.split-brain-resolver.$StaticQuorumName.quorum-size must be >= 1") StaticQuorumSettings(size, role(c)) } @@ -92,7 +92,7 @@ import pekko.util.Helpers.Requiring val leaseImplementation = c.getString("lease-implementation") require( leaseImplementation != "", - s"akka.cluster.split-brain-resolver.$LeaseMajorityName.lease-implementation must be defined") + s"pekko.cluster.split-brain-resolver.$LeaseMajorityName.lease-implementation must be defined") val acquireLeaseDelayForMinority = FiniteDuration(c.getDuration("acquire-lease-delay-for-minority").toMillis, TimeUnit.MILLISECONDS) diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ClusterAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ClusterAccrualFailureDetectorSpec.scala index ef5c388598..1d21d3128e 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ClusterAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ClusterAccrualFailureDetectorSpec.scala @@ -20,7 +20,7 @@ object ClusterAccrualFailureDetectorMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString("akka.cluster.failure-detector.threshold = 4")) + .withFallback(ConfigFactory.parseString("pekko.cluster.failure-detector.threshold = 4")) .withFallback(MultiNodeClusterSpec.clusterConfig)) testTransport(on = true) diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ClusterDeathWatchSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ClusterDeathWatchSpec.scala index e39d6a44e2..4ffe25213b 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ClusterDeathWatchSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ClusterDeathWatchSpec.scala @@ -33,8 +33,8 @@ object ClusterDeathWatchMultiJvmSpec extends MultiNodeConfig { debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """)) .withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ClusterWatcherNoClusterWatcheeSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ClusterWatcherNoClusterWatcheeSpec.scala index f7a9bc4241..fc61fe90e4 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ClusterWatcherNoClusterWatcheeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ClusterWatcherNoClusterWatcheeSpec.scala @@ -30,20 +30,20 @@ class ClusterWatcherNoClusterWatcheeConfig(val useUnsafe: Boolean, artery: Boole val remoting = role("remoting") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.remote.use-unsafe-remote-features-outside-cluster = $useUnsafe - akka.remote.log-remote-lifecycle-events = off - akka.remote.artery.enabled = $artery - akka.log-dead-letters = off - akka.loggers =["org.apache.pekko.testkit.TestEventListener"] - akka.actor.allow-java-serialization = on + pekko.remote.use-unsafe-remote-features-outside-cluster = $useUnsafe + pekko.remote.log-remote-lifecycle-events = off + pekko.remote.artery.enabled = $artery + pekko.log-dead-letters = off + pekko.loggers =["org.apache.pekko.testkit.TestEventListener"] + pekko.actor.allow-java-serialization = on """))) nodeConfig(remoting)(ConfigFactory.parseString(s""" - akka.actor.provider = remote""")) + pekko.actor.provider = remote""")) nodeConfig(clustered)(ConfigFactory.parseString(""" - akka.actor.provider = cluster - akka.cluster.jmx.enabled = off""")) + pekko.actor.provider = cluster + pekko.cluster.jmx.enabled = off""")) } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ConvergenceSpec.scala index b8aa4a6489..f5602774ef 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/ConvergenceSpec.scala @@ -22,8 +22,8 @@ final case class ConvergenceMultiNodeConfig(failureDetectorPuppet: Boolean) exte commonConfig( debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka.cluster.failure-detector.threshold = 4 - akka.cluster.allow-weakly-up-members = off + pekko.cluster.failure-detector.threshold = 4 + pekko.cluster.allow-weakly-up-members = off """)).withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet))) } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/DeterministicOldestWhenJoiningSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/DeterministicOldestWhenJoiningSpec.scala index 33e8d324d7..02b7b2d535 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/DeterministicOldestWhenJoiningSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/DeterministicOldestWhenJoiningSpec.scala @@ -25,8 +25,8 @@ object DeterministicOldestWhenJoiningMultiJvmSpec extends MultiNodeConfig { debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" # not too quick to trigger problematic scenario more often - akka.cluster.leader-actions-interval = 2000 ms - akka.cluster.gossip-interval = 500 ms + pekko.cluster.leader-actions-interval = 2000 ms + pekko.cluster.gossip-interval = 500 ms """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/DowningWhenOtherHasQuarantinedThisActorSystemSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/DowningWhenOtherHasQuarantinedThisActorSystemSpec.scala index e301cc9873..4d4941bca0 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/DowningWhenOtherHasQuarantinedThisActorSystemSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/DowningWhenOtherHasQuarantinedThisActorSystemSpec.scala @@ -26,16 +26,16 @@ object DowningWhenOtherHasQuarantinedThisActorSystemSpec extends MultiNodeConfig .withFallback(MultiNodeClusterSpec.clusterConfig) .withFallback( ConfigFactory.parseString(""" - akka.remote.artery.enabled = on - akka.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" + pekko.remote.artery.enabled = on + pekko.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" # speed up decision - akka.cluster.split-brain-resolver.stable-after = 5s + pekko.cluster.split-brain-resolver.stable-after = 5s """))) // exaggerate the timing issue by ,making the second node decide slower // this is to more consistently repeat the scenario where the other side completes downing // while the isolated part still has not made a decision and then see quarantined connections from the other nodes - nodeConfig(second)(ConfigFactory.parseString("akka.cluster.split-brain-resolver.stable-after = 15s")) + nodeConfig(second)(ConfigFactory.parseString("pekko.cluster.split-brain-resolver.stable-after = 15s")) testTransport(on = true) } @@ -53,7 +53,7 @@ abstract class DowningWhenOtherHasQuarantinedThisActorSystemSpec "Cluster node downed by other" must { - if (!ArterySettings(system.settings.config.getConfig("akka.remote.artery")).Enabled) { + if (!ArterySettings(system.settings.config.getConfig("pekko.remote.artery")).Enabled) { // this feature only works in Artery, because classic remoting will not accept connections from // a quarantined node, and that is too high risk of introducing regressions if changing that pending diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/InitialHeartbeatSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/InitialHeartbeatSpec.scala index da1b804798..7209b703fa 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/InitialHeartbeatSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/InitialHeartbeatSpec.scala @@ -22,7 +22,7 @@ object InitialHeartbeatMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka.cluster.failure-detector.threshold = 4""")).withFallback(MultiNodeClusterSpec.clusterConfig)) + pekko.cluster.failure-detector.threshold = 4""")).withFallback(MultiNodeClusterSpec.clusterConfig)) testTransport(on = true) } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/InitialMembersOfNewDcSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/InitialMembersOfNewDcSpec.scala index 56937b5533..5a6e3fe260 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/InitialMembersOfNewDcSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/InitialMembersOfNewDcSpec.scala @@ -14,19 +14,19 @@ import pekko.testkit.ImplicitSender object InitialMembersOfNewDcSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString(s""" - akka.actor.provider = cluster - akka.actor.warn-about-java-serializer-usage = off - akka.cluster { + pekko.actor.provider = cluster + pekko.actor.warn-about-java-serializer-usage = off + pekko.cluster { jmx.enabled = off debug.verbose-gossip-logging = on } - akka.cluster.multi-data-center { + pekko.cluster.multi-data-center { #cross-data-center-gossip-probability = 0.5 } - akka.loglevel = INFO - akka.log-dead-letters = off - akka.log-dead-letters-during-shutdown = off - akka.loggers = ["org.apache.pekko.testkit.TestEventListener"] + pekko.loglevel = INFO + pekko.log-dead-letters = off + pekko.log-dead-letters-during-shutdown = off + pekko.loggers = ["org.apache.pekko.testkit.TestEventListener"] """)) val one = role("one") @@ -37,11 +37,11 @@ object InitialMembersOfNewDcSpec extends MultiNodeConfig { val five = role("five") nodeConfig(one, two, three) { - ConfigFactory.parseString("akka.cluster.multi-data-center.self-data-center = DC1") + ConfigFactory.parseString("pekko.cluster.multi-data-center.self-data-center = DC1") } nodeConfig(four, five) { - ConfigFactory.parseString("akka.cluster.multi-data-center.self-data-center = DC2") + ConfigFactory.parseString("pekko.cluster.multi-data-center.self-data-center = DC2") } } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/JoinInProgressSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/JoinInProgressSpec.scala index 828af8eb7f..25a44dbb19 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/JoinInProgressSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/JoinInProgressSpec.scala @@ -18,7 +18,7 @@ object JoinInProgressMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { # simulate delay in gossip by turning it off gossip-interval = 300 s failure-detector { diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/JoinSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/JoinSeedNodeSpec.scala index 5336a79d58..3d60d87f7b 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/JoinSeedNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/JoinSeedNodeSpec.scala @@ -23,10 +23,10 @@ object JoinSeedNodeMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString("""akka.cluster.app-version="1.0"""")) + .withFallback(ConfigFactory.parseString("""pekko.cluster.app-version="1.0"""")) .withFallback(MultiNodeClusterSpec.clusterConfig)) - nodeConfig(ordinary1, ordinary2)(ConfigFactory.parseString("""akka.cluster.app-version="2.0"""")) + nodeConfig(ordinary1, ordinary2)(ConfigFactory.parseString("""pekko.cluster.app-version="2.0"""")) } class JoinSeedNodeMultiJvmNode1 extends JoinSeedNodeSpec diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LargeMessageClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LargeMessageClusterSpec.scala index 213a20eea2..61974e84cf 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LargeMessageClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LargeMessageClusterSpec.scala @@ -30,7 +30,7 @@ object LargeMessageClusterMultiJvmSpec extends MultiNodeConfig { // Note that this test uses default configuration, // not MultiNodeClusterSpec.clusterConfig commonConfig(ConfigFactory.parseString(s""" - akka { + pekko { cluster.debug.verbose-heartbeat-logging = on loggers = ["org.apache.pekko.testkit.TestEventListener"] @@ -160,7 +160,7 @@ abstract class LargeMessageClusterSpec // for non Aeron transport we use the Slow message and SlowSerializer to slow down // to not completely overload the machine/network, see issue #24576 - val arterySettings = ArterySettings(system.settings.config.getConfig("akka.remote.artery")) + val arterySettings = ArterySettings(system.settings.config.getConfig("pekko.remote.artery")) val aeronUdpEnabled = arterySettings.Enabled && arterySettings.Transport == ArterySettings.AeronUpd runOn(second) { diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LeaderDowningAllOtherNodesSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LeaderDowningAllOtherNodesSpec.scala index faa5db042e..ef7acc043f 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LeaderDowningAllOtherNodesSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LeaderDowningAllOtherNodesSpec.scala @@ -23,9 +23,9 @@ object LeaderDowningAllOtherNodesMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.failure-detector.monitored-by-nr-of-members = 2 - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 1s + pekko.cluster.failure-detector.monitored-by-nr-of-members = 2 + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 1s """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index 54911a5163..11de4a23c2 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -23,8 +23,8 @@ final case class LeaderDowningNodeThatIsUnreachableMultiNodeConfig(failureDetect commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 2s""")) + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 2s""")) .withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet))) } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LeaderLeavingSpec.scala index 66d1142ce7..a4b49529f2 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LeaderLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/LeaderLeavingSpec.scala @@ -24,8 +24,8 @@ object LeaderLeavingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 0s""")) + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 0s""")) .withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MBeanSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MBeanSpec.scala index fad27ab363..132cb07dd9 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MBeanSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MBeanSpec.scala @@ -24,9 +24,9 @@ object MBeanMultiJvmSpec extends MultiNodeConfig { val fourth = role("fourth") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka.cluster.jmx.enabled = on - akka.cluster.roles = [testNode] - akka.cluster.app-version = "1.2.3" + pekko.cluster.jmx.enabled = on + pekko.cluster.roles = [testNode] + pekko.cluster.app-version = "1.2.3" """)).withFallback(MultiNodeClusterSpec.clusterConfig)) } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MemberWeaklyUpSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MemberWeaklyUpSpec.scala index 21ccf490b1..5b52998fda 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MemberWeaklyUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MemberWeaklyUpSpec.scala @@ -23,8 +23,8 @@ object MemberWeaklyUpSpec extends MultiNodeConfig { val fifth = role("fifth") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka.remote.retry-gate-closed-for = 3 s - akka.cluster.allow-weakly-up-members = 3 s + pekko.remote.retry-gate-closed-for = 3 s + pekko.cluster.allow-weakly-up-members = 3 s """)).withFallback(MultiNodeClusterSpec.clusterConfig)) testTransport(on = true) diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MinMembersBeforeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MinMembersBeforeUpSpec.scala index 62bf2960f7..8dcb5616d8 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MinMembersBeforeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MinMembersBeforeUpSpec.scala @@ -21,7 +21,7 @@ object MinMembersBeforeUpMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString("akka.cluster.min-nr-of-members = 3")) + .withFallback(ConfigFactory.parseString("pekko.cluster.min-nr-of-members = 3")) .withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) } @@ -33,8 +33,8 @@ object MinMembersBeforeUpWithWeaklyUpMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.min-nr-of-members = 3 - akka.cluster.allow-weakly-up-members = 3 s""")) + pekko.cluster.min-nr-of-members = 3 + pekko.cluster.allow-weakly-up-members = 3 s""")) .withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) } @@ -45,12 +45,12 @@ object MinMembersOfRoleBeforeUpMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString("akka.cluster.role.backend.min-nr-of-members = 2")) + .withFallback(ConfigFactory.parseString("pekko.cluster.role.backend.min-nr-of-members = 2")) .withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) - nodeConfig(first)(ConfigFactory.parseString("akka.cluster.roles =[frontend]")) + nodeConfig(first)(ConfigFactory.parseString("pekko.cluster.roles =[frontend]")) - nodeConfig(second, third)(ConfigFactory.parseString("akka.cluster.roles =[backend]")) + nodeConfig(second, third)(ConfigFactory.parseString("pekko.cluster.roles =[backend]")) } class MinMembersBeforeUpMultiJvmNode1 extends MinMembersBeforeUpSpec diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcClusterSpec.scala index 9a8410799d..9fedb1423e 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcClusterSpec.scala @@ -21,15 +21,15 @@ class MultiDcSpecConfig(crossDcConnections: Int = 5) extends MultiNodeConfig { val fifth = role("fifth") commonConfig(ConfigFactory.parseString(s""" - akka.cluster.multi-data-center.cross-data-center-connections = $crossDcConnections + pekko.cluster.multi-data-center.cross-data-center-connections = $crossDcConnections """).withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first, second)(ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = "dc1" + pekko.cluster.multi-data-center.self-data-center = "dc1" """)) nodeConfig(third, fourth, fifth)(ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = "dc2" + pekko.cluster.multi-data-center.self-data-center = "dc2" """)) testTransport(on = true) diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcHeartbeatTakingOverSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcHeartbeatTakingOverSpec.scala index c1e446cb1d..63bfc2f146 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcHeartbeatTakingOverSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcHeartbeatTakingOverSpec.scala @@ -29,19 +29,19 @@ object MultiDcHeartbeatTakingOverSpecMultiJvmSpec extends MultiNodeConfig { val fifth = role("fifth") // beta nodeConfig(first, second, third)(ConfigFactory.parseString(""" - akka { + pekko { cluster.multi-data-center.self-data-center = alpha } """)) nodeConfig(fourth, fifth)(ConfigFactory.parseString(""" - akka { + pekko { cluster.multi-data-center.self-data-center = beta } """)) commonConfig(ConfigFactory.parseString(""" - akka { + pekko { actor.provider = cluster loggers = ["org.apache.pekko.testkit.TestEventListener"] diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcJoin2Spec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcJoin2Spec.scala index 0ce37fe7d1..c4b1f89d1a 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcJoin2Spec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcJoin2Spec.scala @@ -24,19 +24,19 @@ object MultiDcJoin2MultiJvmSpec extends MultiNodeConfig { val fifth = role("fifth") nodeConfig(first, second, third)(ConfigFactory.parseString(""" - akka { + pekko { cluster.multi-data-center.self-data-center = alpha } """)) nodeConfig(fourth, fifth)(ConfigFactory.parseString(""" - akka { + pekko { cluster.multi-data-center.self-data-center = beta } """)) commonConfig(ConfigFactory.parseString(""" - akka { + pekko { actor.provider = cluster loggers = ["org.apache.pekko.testkit.TestEventListener"] diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcJoinSpec.scala index 414fc61c95..894b563e83 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcJoinSpec.scala @@ -23,19 +23,19 @@ object MultiDcJoinMultiJvmSpec extends MultiNodeConfig { val fifth = role("fifth") nodeConfig(first, second, third)(ConfigFactory.parseString(""" - akka { + pekko { cluster.multi-data-center.self-data-center = alpha } """)) nodeConfig(fourth, fifth)(ConfigFactory.parseString(""" - akka { + pekko { cluster.multi-data-center.self-data-center = beta } """)) commonConfig(ConfigFactory.parseString(""" - akka { + pekko { actor.provider = cluster loggers = ["org.apache.pekko.testkit.TestEventListener"] diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcLastNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcLastNodeSpec.scala index e310db167b..42f6f9d39c 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcLastNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcLastNodeSpec.scala @@ -16,15 +16,15 @@ object MultiDcLastNodeSpec extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(s""" - akka.loglevel = INFO + pekko.loglevel = INFO """).withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first, second)(ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = "dc1" + pekko.cluster.multi-data-center.self-data-center = "dc1" """)) nodeConfig(third)(ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = "dc2" + pekko.cluster.multi-data-center.self-data-center = "dc2" """)) } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcSplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcSplitBrainSpec.scala index ed039e6f53..762f966d24 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcSplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcSplitBrainSpec.scala @@ -27,18 +27,18 @@ object MultiDcSplitBrainMultiJvmSpec extends MultiNodeConfig { commonConfig( ConfigFactory .parseString(""" - akka.loglevel = DEBUG # issue #24955 - akka.cluster.debug.verbose-heartbeat-logging = on - akka.cluster.debug.verbose-gossip-logging = on - akka.remote.classic.netty.tcp.connection-timeout = 5 s # speedup in case of connection issue - akka.remote.retry-gate-closed-for = 1 s - akka.cluster.multi-data-center { + pekko.loglevel = DEBUG # issue #24955 + pekko.cluster.debug.verbose-heartbeat-logging = on + pekko.cluster.debug.verbose-gossip-logging = on + pekko.remote.classic.netty.tcp.connection-timeout = 5 s # speedup in case of connection issue + pekko.remote.retry-gate-closed-for = 1 s + pekko.cluster.multi-data-center { failure-detector { acceptable-heartbeat-pause = 4s heartbeat-interval = 1s } } - akka.cluster { + pekko.cluster { gossip-interval = 500ms leader-actions-interval = 1s downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning @@ -48,11 +48,11 @@ object MultiDcSplitBrainMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first, second)(ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = "dc1" + pekko.cluster.multi-data-center.self-data-center = "dc1" """)) nodeConfig(third, fourth, fifth)(ConfigFactory.parseString(""" - akka.cluster.multi-data-center.self-data-center = "dc2" + pekko.cluster.multi-data-center.self-data-center = "dc2" """)) testTransport(on = true) @@ -258,9 +258,9 @@ abstract class MultiDcSplitBrainSpec extends MultiNodeClusterSpec(MultiDcSplitBr val restartedSystem = ActorSystem( system.name, ConfigFactory.parseString(s""" - akka.remote.classic.netty.tcp.port = $port - akka.remote.artery.canonical.port = $port - akka.coordinated-shutdown.terminate-actor-system = on + pekko.remote.classic.netty.tcp.port = $port + pekko.remote.artery.canonical.port = $port + pekko.coordinated-shutdown.terminate-actor-system = on """).withFallback(system.settings.config)) Cluster(restartedSystem).join(thirdAddress) Await.ready(restartedSystem.whenTerminated, remaining) diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcSunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcSunnyWeatherSpec.scala index 9f55a85b9f..48849a34dd 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcSunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiDcSunnyWeatherSpec.scala @@ -25,19 +25,19 @@ object MultiDcSunnyWeatherMultiJvmSpec extends MultiNodeConfig { val fifth = role("fifth") nodeConfig(first, second, third)(ConfigFactory.parseString(""" - akka { + pekko { cluster.multi-data-center.self-data-center = alpha } """)) nodeConfig(fourth, fifth)(ConfigFactory.parseString(""" - akka { + pekko { cluster.multi-data-center.self-data-center = beta } """)) commonConfig(ConfigFactory.parseString(""" - akka { + pekko { actor.provider = cluster loggers = ["org.apache.pekko.testkit.TestEventListener"] diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiNodeClusterSpec.scala index 7bcb0f1016..a0dfcf4f82 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/MultiNodeClusterSpec.scala @@ -34,16 +34,16 @@ object MultiNodeClusterSpec { def clusterConfigWithFailureDetectorPuppet: Config = ConfigFactory .parseString( - "akka.cluster.failure-detector.implementation-class = org.apache.pekko.cluster.FailureDetectorPuppet") + "pekko.cluster.failure-detector.implementation-class = org.apache.pekko.cluster.FailureDetectorPuppet") .withFallback(clusterConfig) def clusterConfig(failureDetectorPuppet: Boolean): Config = if (failureDetectorPuppet) clusterConfigWithFailureDetectorPuppet else clusterConfig def clusterConfig: Config = ConfigFactory.parseString(s""" - akka.actor.provider = cluster - akka.actor.warn-about-java-serializer-usage = off - akka.cluster { + pekko.actor.provider = cluster + pekko.actor.warn-about-java-serializer-usage = off + pekko.cluster { jmx.enabled = off gossip-interval = 200 ms leader-actions-interval = 200 ms @@ -58,14 +58,14 @@ object MultiNodeClusterSpec { waiting-for-state-timeout = 200ms } } - akka.loglevel = INFO - akka.log-dead-letters = off - akka.log-dead-letters-during-shutdown = off - akka.remote { + pekko.loglevel = INFO + pekko.log-dead-letters = off + pekko.log-dead-letters-during-shutdown = off + pekko.remote { log-remote-lifecycle-events = off } - akka.loggers = ["org.apache.pekko.testkit.TestEventListener"] - akka.test { + pekko.loggers = ["org.apache.pekko.testkit.TestEventListener"] + pekko.test { single-expect-default = 5 s } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/NodeChurnSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/NodeChurnSpec.scala index 4154af459b..8e6e37858c 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/NodeChurnSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/NodeChurnSpec.scala @@ -25,12 +25,12 @@ object NodeChurnMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 1s - akka.cluster.prune-gossip-tombstones-after = 1s - akka.remote.classic.log-frame-size-exceeding = 1200b - akka.remote.artery.log-frame-size-exceeding = 1200b - akka.remote.artery.advanced.aeron { + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 1s + pekko.cluster.prune-gossip-tombstones-after = 1s + pekko.remote.classic.log-frame-size-exceeding = 1200b + pekko.remote.artery.log-frame-size-exceeding = 1200b + pekko.remote.artery.advanced.aeron { idle-cpu-level = 1 embedded-media-driver = off aeron-dir = "target/aeron-NodeChurnSpec" diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/NodeDowningAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/NodeDowningAndBeingRemovedSpec.scala index be52750526..d95ae49337 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/NodeDowningAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/NodeDowningAndBeingRemovedSpec.scala @@ -20,7 +20,7 @@ object NodeDowningAndBeingRemovedMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false).withFallback( ConfigFactory - .parseString("akka.cluster.testkit.auto-down-unreachable-after = off") + .parseString("pekko.cluster.testkit.auto-down-unreachable-after = off") .withFallback(MultiNodeClusterSpec.clusterConfig))) } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/QuickRestartSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/QuickRestartSpec.scala index 778be83912..6774c621a7 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/QuickRestartSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/QuickRestartSpec.scala @@ -25,8 +25,8 @@ object QuickRestartMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.testkit.auto-down-unreachable-after = off - akka.cluster.allow-weakly-up-members = off + pekko.cluster.testkit.auto-down-unreachable-after = off + pekko.cluster.allow-weakly-up-members = off """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) @@ -64,15 +64,15 @@ abstract class QuickRestartSpec extends MultiNodeClusterSpec(QuickRestartMultiJv ActorSystem( system.name, MultiNodeSpec.configureNextPortIfFixed( - ConfigFactory.parseString(s"akka.cluster.roles = [round-$n]").withFallback(system.settings.config))) + ConfigFactory.parseString(s"pekko.cluster.roles = [round-$n]").withFallback(system.settings.config))) } else { ActorSystem( system.name, // use the same port ConfigFactory.parseString(s""" - akka.cluster.roles = [round-$n] - akka.remote.classic.netty.tcp.port = ${Cluster(restartingSystem).selfAddress.port.get} - akka.remote.artery.canonical.port = ${Cluster(restartingSystem).selfAddress.port.get} + pekko.cluster.roles = [round-$n] + pekko.remote.classic.netty.tcp.port = ${Cluster(restartingSystem).selfAddress.port.get} + pekko.remote.artery.canonical.port = ${Cluster(restartingSystem).selfAddress.port.get} """).withFallback(system.settings.config)) } log.info("Restarting node has address: {}", Cluster(restartingSystem).selfUniqueAddress) diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RemoteFeaturesWithClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RemoteFeaturesWithClusterSpec.scala index 149bf32c6b..f6b512d2a9 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RemoteFeaturesWithClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RemoteFeaturesWithClusterSpec.scala @@ -29,10 +29,10 @@ class ClusterRemoteFeaturesConfig(artery: Boolean) extends MultiNodeConfig { private val baseConfig = { ConfigFactory.parseString(s""" - akka.remote.log-remote-lifecycle-events = off - akka.remote.artery.enabled = $artery - akka.remote.artery.canonical.port = ${MultiNodeSpec.selfPort} - akka.log-dead-letters-during-shutdown = off + pekko.remote.log-remote-lifecycle-events = off + pekko.remote.artery.enabled = $artery + pekko.remote.artery.canonical.port = ${MultiNodeSpec.selfPort} + pekko.log-dead-letters-during-shutdown = off """).withFallback(MultiNodeClusterSpec.clusterConfig) } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartFirstSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartFirstSeedNodeSpec.scala index f2c94800aa..64ac9a656c 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartFirstSeedNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartFirstSeedNodeSpec.scala @@ -32,9 +32,9 @@ object RestartFirstSeedNodeMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.testkit.auto-down-unreachable-after = off - akka.cluster.retry-unsuccessful-join-after = 3s - akka.cluster.allow-weakly-up-members = off + pekko.cluster.testkit.auto-down-unreachable-after = off + pekko.cluster.retry-unsuccessful-join-after = 3s + pekko.cluster.allow-weakly-up-members = off """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -62,8 +62,8 @@ abstract class RestartFirstSeedNodeSpec lazy val restartedSeed1System = ActorSystem( system.name, ConfigFactory.parseString(s""" - akka.remote.classic.netty.tcp.port = ${seedNodes.head.port.get} - akka.remote.artery.canonical.port = ${seedNodes.head.port.get} + pekko.remote.classic.netty.tcp.port = ${seedNodes.head.port.get} + pekko.remote.artery.canonical.port = ${seedNodes.head.port.get} """).withFallback(system.settings.config)) override def afterAll(): Unit = { diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartNode2Spec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartNode2Spec.scala index fe9f1188ae..34da6223c4 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartNode2Spec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartNode2Spec.scala @@ -30,15 +30,15 @@ object RestartNode2SpecMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 2s - akka.cluster.retry-unsuccessful-join-after = 3s - akka.cluster.allow-weakly-up-members = off - akka.remote.retry-gate-closed-for = 45s - akka.remote.log-remote-lifecycle-events = INFO + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 2s + pekko.cluster.retry-unsuccessful-join-after = 3s + pekko.cluster.allow-weakly-up-members = off + pekko.remote.retry-gate-closed-for = 45s + pekko.remote.log-remote-lifecycle-events = INFO # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) @@ -64,9 +64,9 @@ abstract class RestartNode2SpecSpec extends MultiNodeClusterSpec(RestartNode2Spe lazy val restartedSeed1System = ActorSystem( system.name, ConfigFactory.parseString(s""" - akka.remote.classic.netty.tcp.port = ${seedNodes.head.port.get} - akka.remote.artery.canonical.port = ${seedNodes.head.port.get} - #akka.remote.retry-gate-closed-for = 1s + pekko.remote.classic.netty.tcp.port = ${seedNodes.head.port.get} + pekko.remote.artery.canonical.port = ${seedNodes.head.port.get} + #pekko.remote.retry-gate-closed-for = 1s """).withFallback(system.settings.config)) override def afterAll(): Unit = { diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartNode3Spec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartNode3Spec.scala index 0a3f9d58c5..dec312a755 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartNode3Spec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartNode3Spec.scala @@ -32,11 +32,11 @@ object RestartNode3MultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.testkit.auto-down-unreachable-after = off - akka.cluster.allow-weakly-up-members = off + pekko.cluster.testkit.auto-down-unreachable-after = off + pekko.cluster.allow-weakly-up-members = off # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) @@ -63,8 +63,8 @@ abstract class RestartNode3Spec extends MultiNodeClusterSpec(RestartNode3MultiJv lazy val restartedSecondSystem = ActorSystem( system.name, ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port = ${secondUniqueAddress.address.port.get} - akka.remote.classic.netty.tcp.port = ${secondUniqueAddress.address.port.get} + pekko.remote.artery.canonical.port = ${secondUniqueAddress.address.port.get} + pekko.remote.classic.netty.tcp.port = ${secondUniqueAddress.address.port.get} """).withFallback(system.settings.config)) override def afterAll(): Unit = { diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartNodeSpec.scala index 9b1b53a749..e0da8a3e3d 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/RestartNodeSpec.scala @@ -36,13 +36,13 @@ object RestartNodeMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 5s - akka.cluster.allow-weakly-up-members = off - #akka.remote.use-passive-connections = off + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 5s + pekko.cluster.allow-weakly-up-members = off + #pekko.remote.use-passive-connections = off # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) @@ -83,8 +83,8 @@ abstract class RestartNodeSpec extends MultiNodeClusterSpec(RestartNodeMultiJvmS lazy val restartedSecondSystem = ActorSystem( system.name, ConfigFactory.parseString(s""" - akka.remote.classic.netty.tcp.port = ${secondUniqueAddress.address.port.get} - akka.remote.artery.canonical.port = ${secondUniqueAddress.address.port.get} + pekko.remote.classic.netty.tcp.port = ${secondUniqueAddress.address.port.get} + pekko.remote.artery.canonical.port = ${secondUniqueAddress.address.port.get} """).withFallback(system.settings.config)) override def afterAll(): Unit = { diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SingletonClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SingletonClusterSpec.scala index b704febcdc..f15fa2c586 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SingletonClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SingletonClusterSpec.scala @@ -21,7 +21,7 @@ final case class SingletonClusterMultiNodeConfig(failureDetectorPuppet: Boolean) commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning testkit.auto-down-unreachable-after = 0s failure-detector.threshold = 4 diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SplitBrainQuarantineSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SplitBrainQuarantineSpec.scala index d03ab076a7..515dea7c5b 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SplitBrainQuarantineSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SplitBrainQuarantineSpec.scala @@ -28,12 +28,12 @@ object SplitBrainQuarantineSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig) .withFallback(ConfigFactory.parseString( """ - akka.remote.artery.enabled = on - akka.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" + pekko.remote.artery.enabled = on + pekko.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" # we dont really want this to hit, but we need the sbr enabled to know the quarantining # downing does not trigger - akka.cluster.split-brain-resolver.stable-after = 5 minutes - akka.cluster.debug.verbose-gossip-logging = on + pekko.cluster.split-brain-resolver.stable-after = 5 minutes + pekko.cluster.debug.verbose-gossip-logging = on """))) } @@ -50,7 +50,7 @@ abstract class SplitBrainQuarantineSpec extends MultiNodeClusterSpec(SplitBrainQ "Cluster node downed by other" must { - if (!ArterySettings(system.settings.config.getConfig("akka.remote.artery")).Enabled) { + if (!ArterySettings(system.settings.config.getConfig("pekko.remote.artery")).Enabled) { // this feature only works in Artery, because classic remoting will not accept connections from // a quarantined node, and that is too high risk of introducing regressions if changing that pending diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SplitBrainSpec.scala index 61e6cb0306..892b6fcc95 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SplitBrainSpec.scala @@ -24,8 +24,8 @@ final case class SplitBrainMultiNodeConfig(failureDetectorPuppet: Boolean) exten commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.remote.retry-gate-closed-for = 3 s - akka.cluster { + pekko.remote.retry-gate-closed-for = 3 s + pekko.cluster { downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning testkit.auto-down-unreachable-after = 1s failure-detector.threshold = 4 diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/StreamRefSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/StreamRefSpec.scala index f78231358b..9555e037c9 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/StreamRefSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/StreamRefSpec.scala @@ -38,8 +38,8 @@ object StreamRefSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.stream.materializer.stream-ref.subscription-timeout = 10 s - akka.cluster { + pekko.stream.materializer.stream-ref.subscription-timeout = 10 s + pekko.cluster { downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning testkit.auto-down-unreachable-after = 1s }""")) @@ -256,7 +256,7 @@ abstract class StreamRefSpec extends MultiNodeClusterSpec(StreamRefSpec) with Im // and it triggered the subscription timeout. Therefore we must wait more than the // the subscription timeout for a failure val timeout = system.settings.config - .getDuration("akka.stream.materializer.stream-ref.subscription-timeout") + .getDuration("pekko.stream.materializer.stream-ref.subscription-timeout") .asScala + 2.seconds streamLifecycle3.expectMsg(timeout, "failed-system-42-tmp") } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/StressSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/StressSpec.scala index 510ccb7616..01d13184d1 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/StressSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/StressSpec.scala @@ -57,12 +57,12 @@ import pekko.util.Helpers.Requiring * * By default it uses 13 nodes. * Example of sbt command line parameters to double that: - * `-DMultiJvm.akka.cluster.Stress.nrOfNodes=26 -Dmultinode.Dakka.test.cluster-stress-spec.nr-of-nodes-factor=2` + * `-DMultiJvm.pekko.cluster.Stress.nrOfNodes=26 -Dmultinode.Dakka.test.cluster-stress-spec.nr-of-nodes-factor=2` */ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { val totalNumberOfNodes = - System.getProperty("MultiJvm.akka.cluster.Stress.nrOfNodes") match { + System.getProperty("MultiJvm.pekko.cluster.Stress.nrOfNodes") match { case null => 10 case value => value.toInt.requiring(_ >= 10, "nrOfNodes should be >= 10") } @@ -72,7 +72,7 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { // Note that this test uses default configuration, // not MultiNodeClusterSpec.clusterConfig commonConfig(ConfigFactory.parseString(""" - akka.test.cluster-stress-spec { + pekko.test.cluster-stress-spec { infolog = off # scale the nr-of-nodes* settings with this factor nr-of-nodes-factor = 1 @@ -100,8 +100,8 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { convergence-within-factor = 1.0 } - akka.actor.provider = cluster - akka.cluster { + pekko.actor.provider = cluster + pekko.cluster { failure-detector.acceptable-heartbeat-pause = 3s downing-provider-class = org.apache.pekko.cluster.sbr.SplitBrainResolverProvider split-brain-resolver { @@ -109,23 +109,23 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { } publish-stats-interval = 1s } - akka.loggers = ["org.apache.pekko.testkit.TestEventListener"] - akka.loglevel = INFO - akka.remote.log-remote-lifecycle-events = off - akka.actor.default-dispatcher.fork-join-executor { + pekko.loggers = ["org.apache.pekko.testkit.TestEventListener"] + pekko.loglevel = INFO + pekko.remote.log-remote-lifecycle-events = off + pekko.actor.default-dispatcher.fork-join-executor { parallelism-min = 8 parallelism-max = 8 } # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """)) testTransport(on = true) class Settings(conf: Config) { - private val testConfig = conf.getConfig("akka.test.cluster-stress-spec") + private val testConfig = conf.getConfig("pekko.test.cluster-stress-spec") import testConfig._ val infolog = getBoolean("infolog") diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SunnyWeatherSpec.scala index 7c7adb92ab..0abc97c2fa 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SunnyWeatherSpec.scala @@ -26,7 +26,7 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { // Note that this test uses default configuration, // not MultiNodeClusterSpec.clusterConfig commonConfig(ConfigFactory.parseString(""" - akka { + pekko { actor.provider = cluster loggers = ["org.apache.pekko.testkit.TestEventListener"] loglevel = INFO diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SurviveNetworkInstabilitySpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SurviveNetworkInstabilitySpec.scala index 93f7392ee2..4c96348679 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SurviveNetworkInstabilitySpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/SurviveNetworkInstabilitySpec.scala @@ -38,9 +38,9 @@ object SurviveNetworkInstabilityMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.remote.classic.system-message-buffer-size=100 - akka.remote.artery.advanced.system-message-buffer-size=100 - akka.remote.classic.netty.tcp.connection-timeout = 10s + pekko.remote.classic.system-message-buffer-size=100 + pekko.remote.artery.advanced.system-message-buffer-size=100 + pekko.remote.classic.netty.tcp.connection-timeout = 10s """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/TransitionSpec.scala index 98e6ee880a..772eb6b9a4 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/TransitionSpec.scala @@ -24,8 +24,8 @@ object TransitionMultiJvmSpec extends MultiNodeConfig { debugConfig(on = false) .withFallback( ConfigFactory.parseString(""" - akka.cluster.periodic-tasks-initial-delay = 300 s # turn off all periodic tasks - akka.cluster.publish-stats-interval = 0 s # always, when it happens + pekko.cluster.periodic-tasks-initial-delay = 300 s # turn off all periodic tasks + pekko.cluster.publish-stats-interval = 0 s # always, when it happens """)) .withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/UnreachableNodeJoinsAgainSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/UnreachableNodeJoinsAgainSpec.scala index 77e6b47b21..170f58b6e8 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/UnreachableNodeJoinsAgainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/UnreachableNodeJoinsAgainSpec.scala @@ -31,7 +31,7 @@ object UnreachableNodeJoinsAgainMultiNodeConfig extends MultiNodeConfig { val fourth = role("fourth") commonConfig(ConfigFactory.parseString(""" - akka.remote.log-remote-lifecycle-events = off + pekko.remote.log-remote-lifecycle-events = off """).withFallback(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig))) testTransport(on = true) @@ -163,13 +163,13 @@ abstract class UnreachableNodeJoinsAgainSpec extends MultiNodeClusterSpec(Unreac .parseString( if (RARP(system).provider.remoteSettings.Artery.Enabled) s""" - akka.remote.artery.canonical { + pekko.remote.artery.canonical { hostname = ${victimAddress.host.get} port = ${victimAddress.port.get} } """ else s""" - akka.remote.classic.netty.tcp { + pekko.remote.classic.netty.tcp { hostname = ${victimAddress.host.get} port = ${victimAddress.port.get} }""") diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/routing/ClusterConsistentHashingRouterSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/routing/ClusterConsistentHashingRouterSpec.scala index c303897bb9..13e0fba6c7 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/routing/ClusterConsistentHashingRouterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/routing/ClusterConsistentHashingRouterSpec.scala @@ -47,7 +47,7 @@ object ClusterConsistentHashingRouterMultiJvmSpec extends MultiNodeConfig { } } - akka.actor.deployment { + pekko.actor.deployment { /router1 = $${common-router-settings} /router3 = $${common-router-settings} /router4 = $${common-router-settings} diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/routing/ClusterRoundRobinSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/routing/ClusterRoundRobinSpec.scala index 9b99218279..fc855e42ea 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/routing/ClusterRoundRobinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/routing/ClusterRoundRobinSpec.scala @@ -51,7 +51,7 @@ object ClusterRoundRobinMultiJvmSpec extends MultiNodeConfig { val fourth = role("fourth") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.actor { + pekko.actor { serialization-bindings { "org.apache.pekko.cluster.routing.ClusterRoundRobinMultiJvmSpec$$Reply" = java-test } @@ -92,8 +92,8 @@ object ClusterRoundRobinMultiJvmSpec extends MultiNodeConfig { } """)).withFallback(MultiNodeClusterSpec.clusterConfig)) - nodeConfig(first, second)(ConfigFactory.parseString("""akka.cluster.roles =["a", "c"]""")) - nodeConfig(third)(ConfigFactory.parseString("""akka.cluster.roles =["b", "c"]""")) + nodeConfig(first, second)(ConfigFactory.parseString("""pekko.cluster.roles =["a", "c"]""")) + nodeConfig(third)(ConfigFactory.parseString("""pekko.cluster.roles =["b", "c"]""")) testTransport(on = true) diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/routing/UseRoleIgnoredSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/routing/UseRoleIgnoredSpec.scala index c1228d95a8..19e3d6132e 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/routing/UseRoleIgnoredSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/routing/UseRoleIgnoredSpec.scala @@ -49,8 +49,8 @@ object UseRoleIgnoredMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) - nodeConfig(first)(ConfigFactory.parseString("""akka.cluster.roles =["a", "c"]""")) - nodeConfig(second, third)(ConfigFactory.parseString("""akka.cluster.roles =["b", "c"]""")) + nodeConfig(first)(ConfigFactory.parseString("""pekko.cluster.roles =["a", "c"]""")) + nodeConfig(second, third)(ConfigFactory.parseString("""pekko.cluster.roles =["b", "c"]""")) } diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/DownAllIndirectlyConnected5NodeSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/DownAllIndirectlyConnected5NodeSpec.scala index 9cb427824e..3b30467c7c 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/DownAllIndirectlyConnected5NodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/DownAllIndirectlyConnected5NodeSpec.scala @@ -23,7 +23,7 @@ object DownAllIndirectlyConnected5NodeSpec extends MultiNodeConfig { val node5 = role("node5") commonConfig(ConfigFactory.parseString(""" - akka { + pekko { loglevel = INFO cluster { downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/DownAllUnstable5NodeSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/DownAllUnstable5NodeSpec.scala index bd5f552452..3339c46322 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/DownAllUnstable5NodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/DownAllUnstable5NodeSpec.scala @@ -23,7 +23,7 @@ object DownAllUnstable5NodeSpec extends MultiNodeConfig { val node5 = role("node5") commonConfig(ConfigFactory.parseString(""" - akka { + pekko { loglevel = INFO cluster { downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/IndirectlyConnected3NodeSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/IndirectlyConnected3NodeSpec.scala index 077019695d..44927d4cce 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/IndirectlyConnected3NodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/IndirectlyConnected3NodeSpec.scala @@ -21,7 +21,7 @@ object IndirectlyConnected3NodeSpec extends MultiNodeConfig { val node3 = role("node3") commonConfig(ConfigFactory.parseString(""" - akka { + pekko { loglevel = INFO cluster { downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/IndirectlyConnected5NodeSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/IndirectlyConnected5NodeSpec.scala index 32cc3ddc25..548974acc7 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/IndirectlyConnected5NodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/IndirectlyConnected5NodeSpec.scala @@ -23,7 +23,7 @@ object IndirectlyConnected5NodeSpec extends MultiNodeConfig { val node5 = role("node5") commonConfig(ConfigFactory.parseString(""" - akka { + pekko { loglevel = INFO cluster { downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" diff --git a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/LeaseMajority5NodeSpec.scala b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/LeaseMajority5NodeSpec.scala index 33ff93deee..d7416517f5 100644 --- a/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/LeaseMajority5NodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/org/apache/pekko/cluster/sbr/LeaseMajority5NodeSpec.scala @@ -26,7 +26,7 @@ object LeaseMajority5NodeSpec extends MultiNodeConfig { val node5 = role("node5") commonConfig(ConfigFactory.parseString(s""" - akka { + pekko { loglevel = INFO cluster { gossip-interval = 200 ms diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterConfigSpec.scala index 49ea9dfc03..0c2605b888 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterConfigSpec.scala @@ -62,7 +62,7 @@ class ClusterConfigSpec extends AkkaSpec { "be able to parse non-default cluster config elements" in { val settings = new ClusterSettings( ConfigFactory.parseString(""" - |akka { + |pekko { | cluster { | roles = [ "hamlet" ] | multi-data-center.self-data-center = "blue" diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDeathWatchNotificationSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDeathWatchNotificationSpec.scala index d8dc845686..d4906ea200 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDeathWatchNotificationSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDeathWatchNotificationSpec.scala @@ -17,14 +17,14 @@ import pekko.remote.artery.ArterySpecSupport object ClusterDeathWatchNotificationSpec { val config = ConfigFactory.parseString(s""" - akka { + pekko { loglevel = INFO actor { provider = cluster } } - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """).withFallback(ArterySpecSupport.defaultConfig) object Sender { diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDeployerSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDeployerSpec.scala index dcb45b1664..e4520fd16b 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDeployerSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDeployerSpec.scala @@ -18,8 +18,8 @@ import pekko.testkit._ object ClusterDeployerSpec { val deployerConf = ConfigFactory.parseString( """ - akka.actor.provider = "cluster" - akka.actor.deployment { + pekko.actor.provider = "cluster" + pekko.actor.deployment { /user/service1 { router = round-robin-pool cluster.enabled = on @@ -37,8 +37,8 @@ object ClusterDeployerSpec { cluster.allow-local-routees = off } } - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """, ConfigParseOptions.defaults) @@ -52,7 +52,7 @@ class ClusterDeployerSpec extends AkkaSpec(ClusterDeployerSpec.deployerConf) { "A RemoteDeployer" must { - "be able to parse 'akka.actor.deployment._' with specified cluster pool" in { + "be able to parse 'pekko.actor.deployment._' with specified cluster pool" in { val service = "/user/service1" val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service.split("/").drop(1)) deployment should not be None @@ -69,7 +69,7 @@ class ClusterDeployerSpec extends AkkaSpec(ClusterDeployerSpec.deployerConf) { Deploy.NoMailboxGiven))) } - "be able to parse 'akka.actor.deployment._' with specified cluster group" in { + "be able to parse 'pekko.actor.deployment._' with specified cluster group" in { val service = "/user/service2" val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service.split("/").drop(1)) deployment should not be None diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDomainEventPublisherSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDomainEventPublisherSpec.scala index 4f22dd9eea..4c2d98efdc 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDomainEventPublisherSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDomainEventPublisherSpec.scala @@ -24,9 +24,9 @@ import pekko.testkit.TestProbe object ClusterDomainEventPublisherSpec { val config = """ - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """ } diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDomainEventSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDomainEventSpec.scala index 16c91972ab..05bfeec4b5 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDomainEventSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterDomainEventSpec.scala @@ -36,13 +36,13 @@ class ClusterDomainEventSpec extends AnyWordSpec with Matchers with BeforeAndAft val eDown = TestMember(Address("akka", "sys", "e", 2552), Down, eRoles) val selfDummyAddress = UniqueAddress(Address("akka", "sys", "selfDummy", 2552), 17L) - private val originalClusterAssert = sys.props.get("akka.cluster.assert").getOrElse("false") + private val originalClusterAssert = sys.props.get("pekko.cluster.assert").getOrElse("false") override protected def beforeAll(): Unit = { - System.setProperty("akka.cluster.assert", "on") + System.setProperty("pekko.cluster.assert", "on") } override protected def afterAll(): Unit = { - System.setProperty("akka.cluster.assert", originalClusterAssert) + System.setProperty("pekko.cluster.assert", originalClusterAssert) } private[cluster] def converge(gossip: Gossip): (Gossip, Set[UniqueAddress]) = diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterHeartbeatReceiverSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterHeartbeatReceiverSpec.scala index ead5a11c66..f04cbe188f 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterHeartbeatReceiverSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterHeartbeatReceiverSpec.scala @@ -9,7 +9,7 @@ import pekko.cluster.ClusterHeartbeatSender.{ Heartbeat, HeartbeatRsp } import pekko.testkit.{ AkkaSpec, ImplicitSender } class ClusterHeartbeatReceiverSpec extends AkkaSpec(""" - akka.actor.provider = cluster + pekko.actor.provider = cluster """.stripMargin) with ImplicitSender { "ClusterHeartbeatReceiver" should { "respond to heartbeats with the same sequenceNr and sendTime" in { diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterHeartbeatSenderSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterHeartbeatSenderSpec.scala index 4e6da6b23d..fa7fa92e91 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterHeartbeatSenderSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterHeartbeatSenderSpec.scala @@ -25,9 +25,9 @@ object ClusterHeartbeatSenderSpec { } class ClusterHeartbeatSenderSpec extends AkkaSpec(""" - akka.loglevel = DEBUG - akka.actor.provider = cluster - akka.cluster.failure-detector.heartbeat-interval = 0.2s + pekko.loglevel = DEBUG + pekko.actor.provider = cluster + pekko.cluster.failure-detector.heartbeat-interval = 0.2s """.stripMargin) with ImplicitSender { "ClusterHeartBeatSender" must { diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterLogSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterLogSpec.scala index 5cd101fc43..eff2141e9d 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterLogSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterLogSpec.scala @@ -12,18 +12,18 @@ import pekko.testkit.{ AkkaSpec, EventFilter, ImplicitSender } object ClusterLogSpec { val config = """ - akka.cluster { + pekko.cluster { downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning testkit.auto-down-unreachable-after = 0s publish-stats-interval = 0 s # always, when it happens failure-detector.implementation-class = org.apache.pekko.cluster.FailureDetectorPuppet } - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.loglevel = "INFO" - akka.loggers = ["org.apache.pekko.testkit.TestEventListener"] + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.loglevel = "INFO" + pekko.loggers = ["org.apache.pekko.testkit.TestEventListener"] """ } @@ -88,7 +88,7 @@ class ClusterLogVerboseDefaultSpec extends ClusterLogSpec(ConfigFactory.parseStr class ClusterLogVerboseEnabledSpec extends ClusterLogSpec( ConfigFactory - .parseString("akka.cluster.log-info-verbose = on") + .parseString("pekko.cluster.log-info-verbose = on") .withFallback(ConfigFactory.parseString(ClusterLogSpec.config))) { "A Cluster" must { diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterSpec.scala index dfcd35fd43..2250bf3ea6 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterSpec.scala @@ -32,7 +32,7 @@ import pekko.util.Version object ClusterSpec { val config = """ - akka.cluster { + pekko.cluster { downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning testkit.auto-down-unreachable-after = 0s periodic-tasks-initial-delay = 120 seconds // turn off scheduled tasks @@ -40,10 +40,10 @@ object ClusterSpec { failure-detector.implementation-class = org.apache.pekko.cluster.FailureDetectorPuppet app-version = "1.2.3" } - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider = "cluster" + pekko.remote.log-remote-lifecycle-events = off + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """ final case class GossipTo(address: Address) @@ -150,9 +150,9 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { val sys2 = ActorSystem( "ClusterSpec2", ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """)) try { val ref = sys2.actorOf(Props.empty) @@ -184,9 +184,9 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { val sys2 = ActorSystem( "ClusterSpec2", ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """)) try { val probe = TestProbe()(sys2) @@ -214,10 +214,10 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { val sys2 = ActorSystem( "ClusterSpec2", ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.coordinated-shutdown.terminate-actor-system = on + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.coordinated-shutdown.terminate-actor-system = on """)) try { val probe = TestProbe()(sys2) @@ -252,10 +252,10 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { val sys2 = ActorSystem( "ClusterSpec2", ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.cluster.min-nr-of-members = 2 + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.cluster.min-nr-of-members = 2 """)) try { val probe = TestProbe()(sys2) @@ -283,10 +283,10 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { val sys2 = ActorSystem( "ClusterSpec2", ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.coordinated-shutdown.terminate-actor-system = on + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.coordinated-shutdown.terminate-actor-system = on """)) try { val probe = TestProbe()(sys2) @@ -317,11 +317,11 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { val sys3 = ActorSystem( "ClusterSpec3", ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.coordinated-shutdown.terminate-actor-system = on - akka.cluster.run-coordinated-shutdown-when-down = on + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.coordinated-shutdown.terminate-actor-system = on + pekko.cluster.run-coordinated-shutdown-when-down = on """)) try { val probe = TestProbe()(sys3) @@ -341,11 +341,11 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { } } - "register multiple cluster JMX MBeans with akka.cluster.jmx.multi-mbeans-in-same-jvm = on" in { + "register multiple cluster JMX MBeans with pekko.cluster.jmx.multi-mbeans-in-same-jvm = on" in { def getConfig = (port: Int) => ConfigFactory.parseString(s""" - akka.cluster.jmx.multi-mbeans-in-same-jvm = on - akka.remote.classic.netty.tcp.port = ${port} - akka.remote.artery.canonical.port = ${port} + pekko.cluster.jmx.multi-mbeans-in-same-jvm = on + pekko.remote.classic.netty.tcp.port = ${port} + pekko.remote.artery.canonical.port = ${port} """).withFallback(ConfigFactory.parseString(ClusterSpec.config)) val sys1 = ActorSystem("ClusterSpec4", getConfig(2552)) diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterTestKit.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterTestKit.scala index 50dbd72c0f..767bc71b93 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterTestKit.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ClusterTestKit.scala @@ -137,8 +137,8 @@ trait ClusterTestKit extends TestKitBase { actorSystems = actorSystems.filterNot(_ == actorSystem) val newConfig = ConfigFactory.parseString(s""" - akka.remote.classic.netty.tcp.port = $port - akka.remote.artery.canonical.port = $port + pekko.remote.classic.netty.tcp.port = $port + pekko.remote.artery.canonical.port = $port """).withFallback(config) if (firstSeedNode) newActorSystemAsFirst(newConfig) @@ -164,7 +164,7 @@ abstract class RollingUpgradeClusterSpec(config: Config) extends AkkaSpec(config * Starts `size` * Note that the two versions of config are validated against each other and have to * be valid both ways: v1 => v2, v2 => v1. Uses a timeout of 20 seconds and - * defaults to `akka.cluster.configuration-compatibility-check.enforce-on-join = on`. + * defaults to `pekko.cluster.configuration-compatibility-check.enforce-on-join = on`. * * @param clusterSize the cluster size - number of nodes to create for the cluster * @param v1Config the version of config to base validation against @@ -189,7 +189,7 @@ abstract class RollingUpgradeClusterSpec(config: Config) extends AkkaSpec(config * @param timeout the duration to wait for each member to be [[MemberStatus.Up]] on re-join * @param awaitAll the duration to wait for all members to be [[MemberStatus.Up]] on initial join, * and for the one node not upgraded to register member size as `clusterSize` on upgrade - * @param enforced toggle `akka.cluster.configuration-compatibility-check.enforce-on-join` on or off + * @param enforced toggle `pekko.cluster.configuration-compatibility-check.enforce-on-join` on or off * @param shouldRejoin the condition being tested on attempted re-join: members up or terminated */ def upgradeCluster( @@ -230,6 +230,6 @@ abstract class RollingUpgradeClusterSpec(config: Config) extends AkkaSpec(config def unenforced(config: Config): Config = ConfigFactory - .parseString("""akka.cluster.configuration-compatibility-check.enforce-on-join = off""") + .parseString("""pekko.cluster.configuration-compatibility-check.enforce-on-join = off""") .withFallback(config) } diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/CrossDcHeartbeatSenderSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/CrossDcHeartbeatSenderSpec.scala index 039507b226..bb7bdcb6d1 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/CrossDcHeartbeatSenderSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/CrossDcHeartbeatSenderSpec.scala @@ -31,11 +31,11 @@ object CrossDcHeartbeatSenderSpec { } class CrossDcHeartbeatSenderSpec extends AkkaSpec(""" - akka.loglevel = DEBUG - akka.actor.provider = cluster + pekko.loglevel = DEBUG + pekko.actor.provider = cluster # should not be used here - akka.cluster.failure-detector.heartbeat-interval = 5s - akka.cluster.multi-data-center { + pekko.cluster.failure-detector.heartbeat-interval = 5s + pekko.cluster.multi-data-center { self-data-center = "dc1" failure-detector.heartbeat-interval = 0.2s } diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/DowningProviderSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/DowningProviderSpec.scala index 4b617ca16c..80195f29cd 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/DowningProviderSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/DowningProviderSpec.scala @@ -39,7 +39,7 @@ class DummyDowningProvider(@unused system: ActorSystem) extends DowningProvider class DowningProviderSpec extends AnyWordSpec with Matchers { val baseConf = ConfigFactory.parseString(""" - akka { + pekko { loglevel = WARNING actor.provider = "cluster" remote { @@ -67,7 +67,7 @@ class DowningProviderSpec extends AnyWordSpec with Matchers { val system = ActorSystem( "auto-downing", ConfigFactory.parseString(""" - akka.cluster.downing-provider-class="org.apache.pekko.cluster.DummyDowningProvider" + pekko.cluster.downing-provider-class="org.apache.pekko.cluster.DummyDowningProvider" """).withFallback(baseConf)) Cluster(system).downingProvider shouldBe a[DummyDowningProvider] @@ -85,7 +85,7 @@ class DowningProviderSpec extends AnyWordSpec with Matchers { ActorSystem( "auto-downing", ConfigFactory.parseString(""" - akka.cluster.downing-provider-class="org.apache.pekko.cluster.FailingDowningProvider" + pekko.cluster.downing-provider-class="org.apache.pekko.cluster.FailingDowningProvider" """).withFallback(baseConf))) } catch { case NonFatal(_) => diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatCheckClusterSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatCheckClusterSpec.scala index ac0d04ddbc..f050cfad95 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatCheckClusterSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatCheckClusterSpec.scala @@ -54,10 +54,10 @@ class JoinConfigCompatCheckClusterSpec extends AkkaSpec { "JoinConfigCompatCheckCluster" must { "be valid when no downing-provider" in { val oldConfig = ConfigFactory.parseString(""" - akka.cluster.downing-provider-class = "" + pekko.cluster.downing-provider-class = "" """).withFallback(system.settings.config) val newConfig = ConfigFactory.parseString(""" - akka.cluster.downing-provider-class = "" + pekko.cluster.downing-provider-class = "" """).withFallback(system.settings.config) checkInitJoin(oldConfig, newConfig) should ===(Valid) } @@ -65,11 +65,11 @@ class JoinConfigCompatCheckClusterSpec extends AkkaSpec { "be valid when same downing-provider" in { val oldConfig = ConfigFactory.parseString(""" - akka.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" + pekko.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" """).withFallback(system.settings.config) val newConfig = ConfigFactory.parseString(""" - akka.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" + pekko.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" """).withFallback(system.settings.config) checkInitJoin(oldConfig, newConfig) should ===(Valid) } @@ -78,12 +78,12 @@ class JoinConfigCompatCheckClusterSpec extends AkkaSpec { val oldConfig = ConfigFactory .parseString(""" - akka.cluster.downing-provider-class = "com.lightbend.akka.sbr.SplitBrainResolverProvider" + pekko.cluster.downing-provider-class = "com.lightbend.pekko.sbr.SplitBrainResolverProvider" """) .withFallback(system.settings.config) val newConfig = ConfigFactory.parseString(""" - akka.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" + pekko.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" """).withFallback(system.settings.config) checkInitJoin(oldConfig, newConfig) should ===(Valid) } @@ -91,11 +91,11 @@ class JoinConfigCompatCheckClusterSpec extends AkkaSpec { "be invalid when different downing-provider" in { val oldConfig = ConfigFactory.parseString(""" - akka.cluster.downing-provider-class = "org.apache.pekko.cluster.testkit.AutoDowning" + pekko.cluster.downing-provider-class = "org.apache.pekko.cluster.testkit.AutoDowning" """).withFallback(system.settings.config) val newConfig = ConfigFactory.parseString(""" - akka.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" + pekko.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" """).withFallback(system.settings.config) checkInitJoin(oldConfig, newConfig).getClass should ===(classOf[Invalid]) } @@ -103,13 +103,13 @@ class JoinConfigCompatCheckClusterSpec extends AkkaSpec { "be invalid when different sbr strategy" in { val oldConfig = ConfigFactory.parseString(""" - akka.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" - akka.cluster.split-brain-resolver.active-strategy = keep-majority + pekko.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" + pekko.cluster.split-brain-resolver.active-strategy = keep-majority """).withFallback(system.settings.config) val newConfig = ConfigFactory.parseString(""" - akka.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" - akka.cluster.split-brain-resolver.active-strategy = keep-oldest + pekko.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" + pekko.cluster.split-brain-resolver.active-strategy = keep-oldest """).withFallback(system.settings.config) checkInitJoin(oldConfig, newConfig).getClass should ===(classOf[Invalid]) checkInitJoinAck(oldConfig, newConfig).getClass should ===(classOf[Invalid]) diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatCheckerRollingUpdateSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatCheckerRollingUpdateSpec.scala index 5255d0f91d..002640164e 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatCheckerRollingUpdateSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatCheckerRollingUpdateSpec.scala @@ -16,12 +16,12 @@ import pekko.testkit.LongRunningTest object JoinConfigCompatCheckerRollingUpdateSpec { val baseConfig = ConfigFactory.parseString(s""" - akka.log-dead-letters = off - akka.log-dead-letters-during-shutdown = off - akka.remote.log-remote-lifecycle-events = off - akka.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning - akka.cluster.testkit.auto-down-unreachable-after = 0s - akka.cluster { + pekko.log-dead-letters = off + pekko.log-dead-letters-during-shutdown = off + pekko.remote.log-remote-lifecycle-events = off + pekko.cluster.downing-provider-class = org.apache.pekko.cluster.testkit.AutoDowning + pekko.cluster.testkit.auto-down-unreachable-after = 0s + pekko.cluster { jmx.enabled = off gossip-interval = 200 ms leader-actions-interval = 200 ms @@ -34,8 +34,8 @@ object JoinConfigCompatCheckerRollingUpdateSpec { val v1Config: Config = baseConfig.withFallback(JoinConfigCompatCheckerSpec.configWithChecker) private val v2 = ConfigFactory.parseString(""" - akka.cluster.new-configuration = "v2" - akka.cluster.configuration-compatibility-check.checkers { + pekko.cluster.new-configuration = "v2" + pekko.cluster.configuration-compatibility-check.checkers { rolling-upgrade-test = "org.apache.pekko.cluster.JoinConfigCompatRollingUpdateChecker" } """) @@ -71,7 +71,7 @@ class JoinConfigCompatCheckerRollingUpdateSpec } class JoinConfigCompatRollingUpdateChecker extends JoinConfigCompatChecker { - override def requiredKeys: im.Seq[String] = im.Seq("akka.cluster.new-configuration") + override def requiredKeys: im.Seq[String] = im.Seq("pekko.cluster.new-configuration") override def check(toCheck: Config, actualConfig: Config): ConfigValidation = { if (toCheck.hasPath(requiredKeys.head)) JoinConfigCompatChecker.fullMatch(requiredKeys, toCheck, actualConfig) diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatCheckerSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatCheckerSpec.scala index ccf8f9cab7..5aa00eeb9a 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatCheckerSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatCheckerSpec.scala @@ -15,17 +15,17 @@ object JoinConfigCompatCheckerSpec { val baseConfig: Config = ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.coordinated-shutdown.terminate-actor-system = on - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.cluster.jmx.multi-mbeans-in-same-jvm = on - akka.remote.artery.advanced.aeron.idle-cpu-level = 3 + pekko.actor.provider = "cluster" + pekko.coordinated-shutdown.terminate-actor-system = on + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.cluster.jmx.multi-mbeans-in-same-jvm = on + pekko.remote.artery.advanced.aeron.idle-cpu-level = 3 """) val configWithChecker: Config = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { config-compat-test = "test" sensitive.properties { username = "abc" @@ -35,10 +35,10 @@ object JoinConfigCompatCheckerSpec { configuration-compatibility-check { enforce-on-join = on checkers { - akka-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" + pekko-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" } sensitive-config-paths { - akka = [ "akka.cluster.sensitive.properties" ] + akka = [ "pekko.cluster.sensitive.properties" ] } } } @@ -70,14 +70,14 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // this config is NOT compatible with the cluster config val joinNodeConfig = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { # this config is incompatible config-compat-test = "test2" configuration-compatibility-check { enforce-on-join = on checkers { - akka-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" + pekko-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" } } } @@ -110,16 +110,16 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // This test verifies that cluster config are being sent back and checked on joining node as well val joinNodeConfig = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { # this config is not available on cluster side - akka.cluster.config-compat-test-extra = on + pekko.cluster.config-compat-test-extra = on configuration-compatibility-check { enforce-on-join = on checkers { - akka-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" - akka-cluster-extra = "org.apache.pekko.cluster.JoinConfigCompatCheckerExtraTest" + pekko-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" + pekko-cluster-extra = "org.apache.pekko.cluster.JoinConfigCompatCheckerExtraTest" } } } @@ -151,7 +151,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // This test verifies that cluster config are being sent back and checked on joining node as well val joinNodeConfig = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { # this config is required on cluster side # config-compat-test = "test" @@ -188,16 +188,16 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // This test verifies that validation on joining side takes 'configuration-compatibility-check.enforce-on-join' in consideration val joinNodeConfig = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { # this config is not available on cluster side - akka.cluster.config-compat-test-extra = on + pekko.cluster.config-compat-test-extra = on configuration-compatibility-check { enforce-on-join = off checkers { - akka-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" - akka-cluster-extra = "org.apache.pekko.cluster.JoinConfigCompatCheckerExtraTest" + pekko-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" + pekko-cluster-extra = "org.apache.pekko.cluster.JoinConfigCompatCheckerExtraTest" } } } @@ -224,13 +224,13 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // but node will ignore the the config check and join anyway val joinNodeConfig = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { configuration-compatibility-check { # not enforcing config compat check enforce-on-join = off checkers { - akka-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" + pekko-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" } } # this config is incompatible @@ -254,11 +254,11 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { } /** This test verifies the built-in JoinConfigCompatCheckerAkkaCluster */ - "NOT be allowed to join a cluster using a different value for akka.cluster.downing-provider-class" taggedAs LongRunningTest in { + "NOT be allowed to join a cluster using a different value for pekko.cluster.downing-provider-class" taggedAs LongRunningTest in { val joinNodeConfig = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { # using explicit downing provider class downing-provider-class = "org.apache.pekko.cluster.testkit.AutoDowning" @@ -324,14 +324,14 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // this config is NOT compatible with the cluster config val joinNodeConfig = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { # this config is incompatible config-compat-test = "test2" configuration-compatibility-check { enforce-on-join = on checkers { - akka-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" + pekko-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" } } } @@ -370,16 +370,16 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // This test verifies that cluster config are being sent back and checked on joining node as well val joinNodeConfig = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { # this config is not available on cluster side - akka.cluster.config-compat-test-extra = on + pekko.cluster.config-compat-test-extra = on configuration-compatibility-check { enforce-on-join = on checkers { - akka-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" - akka-cluster-extra = "org.apache.pekko.cluster.JoinConfigCompatCheckerExtraTest" + pekko-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" + pekko-cluster-extra = "org.apache.pekko.cluster.JoinConfigCompatCheckerExtraTest" } } } @@ -418,7 +418,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // This test verifies that cluster config are being sent back and checked on joining node as well val joinNodeConfig = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { # this config is required on cluster side # config-compat-test = "test" @@ -462,16 +462,16 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // This test verifies that validation on joining side takes 'configuration-compatibility-check.enforce-on-join' in consideration val joinNodeConfig = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { # this config is not available on cluster side - akka.cluster.config-compat-test-extra = on + pekko.cluster.config-compat-test-extra = on configuration-compatibility-check { enforce-on-join = off checkers { - akka-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" - akka-cluster-extra = "org.apache.pekko.cluster.JoinConfigCompatCheckerExtraTest" + pekko-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" + pekko-cluster-extra = "org.apache.pekko.cluster.JoinConfigCompatCheckerExtraTest" } } } @@ -508,13 +508,13 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // but node will ignore the the config check and join anyway val joinNodeConfig = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { configuration-compatibility-check { # not enforcing config compat check enforce-on-join = off checkers { - akka-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" + pekko-cluster-test = "org.apache.pekko.cluster.JoinConfigCompatCheckerTest" } } # this config is incompatible @@ -557,7 +557,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // the cluster won't let it be leaked back to the joining node neither which will fail the join attempt. val joinNodeConfig = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { # these config are compatible, # but won't be leaked back to joining node which will cause it to fail to join @@ -607,12 +607,12 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // the cluster won't let it be leaked back to the joining node neither which will fail the join attempt. val joinNodeConfig = ConfigFactory.parseString(""" - akka.cluster { + pekko.cluster { configuration-compatibility-check { checkers { # disable what is defined in reference.conf - akka-cluster = "" - akka-cluster-test = "" + pekko-cluster = "" + pekko-cluster-test = "" } } } @@ -630,14 +630,14 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { } class JoinConfigCompatCheckerTest extends JoinConfigCompatChecker { - override def requiredKeys = im.Seq("akka.cluster.config-compat-test") + override def requiredKeys = im.Seq("pekko.cluster.config-compat-test") override def check(toValidate: Config, actualConfig: Config): ConfigValidation = JoinConfigCompatChecker.fullMatch(requiredKeys, toValidate, actualConfig) } class JoinConfigCompatCheckerExtraTest extends JoinConfigCompatChecker { - override def requiredKeys = im.Seq("akka.cluster.config-compat-test-extra") + override def requiredKeys = im.Seq("pekko.cluster.config-compat-test-extra") override def check(toValidate: Config, actualConfig: Config): ConfigValidation = JoinConfigCompatChecker.fullMatch(requiredKeys, toValidate, actualConfig) @@ -647,7 +647,7 @@ class JoinConfigCompatCheckerExtraTest extends JoinConfigCompatChecker { class RogueJoinConfigCompatCheckerTest extends JoinConfigCompatChecker { override def requiredKeys = - im.Seq("akka.cluster.sensitive.properties.password", "akka.cluster.sensitive.properties.username") + im.Seq("pekko.cluster.sensitive.properties.password", "pekko.cluster.sensitive.properties.username") /** this check always returns Valid. The goal is to try to make the cluster leak those properties */ override def check(toValidate: Config, actualConfig: Config): ConfigValidation = diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatPreDefinedChecksSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatPreDefinedChecksSpec.scala index 9f7a5dfc09..e379ff159e 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatPreDefinedChecksSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/JoinConfigCompatPreDefinedChecksSpec.scala @@ -16,9 +16,9 @@ class JoinConfigCompatPreDefinedChecksSpec extends AnyWordSpec with Matchers { "JoinConfigCompatChecker.exists" must { val requiredKeys = im.Seq( - "akka.cluster.min-nr-of-members", - "akka.cluster.retry-unsuccessful-join-after", - "akka.cluster.allow-weakly-up-members") + "pekko.cluster.min-nr-of-members", + "pekko.cluster.retry-unsuccessful-join-after", + "pekko.cluster.allow-weakly-up-members") "pass when all required keys are provided" in { @@ -27,9 +27,9 @@ class JoinConfigCompatPreDefinedChecksSpec extends AnyWordSpec with Matchers { requiredKeys, config(""" |{ - | akka.cluster.min-nr-of-members = 1 - | akka.cluster.retry-unsuccessful-join-after = 10s - | akka.cluster.allow-weakly-up-members = on + | pekko.cluster.min-nr-of-members = 1 + | pekko.cluster.retry-unsuccessful-join-after = 10s + | pekko.cluster.allow-weakly-up-members = on |} """.stripMargin)) @@ -42,15 +42,15 @@ class JoinConfigCompatPreDefinedChecksSpec extends AnyWordSpec with Matchers { requiredKeys, config(""" |{ - | akka.cluster.min-nr-of-members = 1 + | pekko.cluster.min-nr-of-members = 1 |} """.stripMargin)) match { case Valid => fail() case Invalid(incompatibleKeys) => incompatibleKeys should have size 2 - incompatibleKeys should contain("akka.cluster.retry-unsuccessful-join-after is missing") - incompatibleKeys should contain("akka.cluster.allow-weakly-up-members is missing") + incompatibleKeys should contain("pekko.cluster.retry-unsuccessful-join-after is missing") + incompatibleKeys should contain("pekko.cluster.allow-weakly-up-members is missing") } } } @@ -58,16 +58,16 @@ class JoinConfigCompatPreDefinedChecksSpec extends AnyWordSpec with Matchers { "JoinConfigCompatChecker.fullMatch" must { val requiredKeys = im.Seq( - "akka.cluster.min-nr-of-members", - "akka.cluster.retry-unsuccessful-join-after", - "akka.cluster.allow-weakly-up-members") + "pekko.cluster.min-nr-of-members", + "pekko.cluster.retry-unsuccessful-join-after", + "pekko.cluster.allow-weakly-up-members") val clusterConfig = config(""" |{ - | akka.cluster.min-nr-of-members = 1 - | akka.cluster.retry-unsuccessful-join-after = 10s - | akka.cluster.allow-weakly-up-members = on + | pekko.cluster.min-nr-of-members = 1 + | pekko.cluster.retry-unsuccessful-join-after = 10s + | pekko.cluster.allow-weakly-up-members = on |} """.stripMargin) @@ -78,9 +78,9 @@ class JoinConfigCompatPreDefinedChecksSpec extends AnyWordSpec with Matchers { requiredKeys, config(""" |{ - | akka.cluster.min-nr-of-members = 1 - | akka.cluster.retry-unsuccessful-join-after = 10s - | akka.cluster.allow-weakly-up-members = on + | pekko.cluster.min-nr-of-members = 1 + | pekko.cluster.retry-unsuccessful-join-after = 10s + | pekko.cluster.allow-weakly-up-members = on |} """.stripMargin), clusterConfig) @@ -94,7 +94,7 @@ class JoinConfigCompatPreDefinedChecksSpec extends AnyWordSpec with Matchers { requiredKeys, config(""" |{ - | akka.cluster.min-nr-of-members = 1 + | pekko.cluster.min-nr-of-members = 1 |} """.stripMargin), clusterConfig) match { @@ -102,8 +102,8 @@ class JoinConfigCompatPreDefinedChecksSpec extends AnyWordSpec with Matchers { fail() case Invalid(incompatibleKeys) => incompatibleKeys should have size 2 - incompatibleKeys should contain("akka.cluster.retry-unsuccessful-join-after is missing") - incompatibleKeys should contain("akka.cluster.allow-weakly-up-members is missing") + incompatibleKeys should contain("pekko.cluster.retry-unsuccessful-join-after is missing") + incompatibleKeys should contain("pekko.cluster.allow-weakly-up-members is missing") } } @@ -113,9 +113,9 @@ class JoinConfigCompatPreDefinedChecksSpec extends AnyWordSpec with Matchers { requiredKeys, config(""" |{ - | akka.cluster.min-nr-of-members = 1 - | akka.cluster.retry-unsuccessful-join-after = 15s - | akka.cluster.allow-weakly-up-members = off + | pekko.cluster.min-nr-of-members = 1 + | pekko.cluster.retry-unsuccessful-join-after = 15s + | pekko.cluster.allow-weakly-up-members = off |} """.stripMargin), clusterConfig) match { @@ -123,8 +123,8 @@ class JoinConfigCompatPreDefinedChecksSpec extends AnyWordSpec with Matchers { fail() case Invalid(incompatibleKeys) => incompatibleKeys should have size 2 - incompatibleKeys should contain("akka.cluster.retry-unsuccessful-join-after is incompatible") - incompatibleKeys should contain("akka.cluster.allow-weakly-up-members is incompatible") + incompatibleKeys should contain("pekko.cluster.retry-unsuccessful-join-after is incompatible") + incompatibleKeys should contain("pekko.cluster.allow-weakly-up-members is incompatible") } } @@ -134,8 +134,8 @@ class JoinConfigCompatPreDefinedChecksSpec extends AnyWordSpec with Matchers { requiredKeys, config(""" |{ - | akka.cluster.min-nr-of-members = 1 - | akka.cluster.allow-weakly-up-members = off + | pekko.cluster.min-nr-of-members = 1 + | pekko.cluster.allow-weakly-up-members = off |} """.stripMargin), clusterConfig) match { @@ -143,8 +143,8 @@ class JoinConfigCompatPreDefinedChecksSpec extends AnyWordSpec with Matchers { fail() case Invalid(incompatibleKeys) => incompatibleKeys should have size 2 - incompatibleKeys should contain("akka.cluster.retry-unsuccessful-join-after is missing") - incompatibleKeys should contain("akka.cluster.allow-weakly-up-members is incompatible") + incompatibleKeys should contain("pekko.cluster.retry-unsuccessful-join-after is missing") + incompatibleKeys should contain("pekko.cluster.allow-weakly-up-members is incompatible") } } } diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ResetSystemMessageSeqNrSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ResetSystemMessageSeqNrSpec.scala index 2b0cfc5a3a..9927baac56 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ResetSystemMessageSeqNrSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ResetSystemMessageSeqNrSpec.scala @@ -18,9 +18,9 @@ import pekko.testkit.TestActors * Reproducer for issue #24847 */ class ResetSystemMessageSeqNrSpec extends ArteryMultiNodeSpec(""" - akka.loglevel = INFO - akka.actor.provider=cluster - akka.cluster.jmx.multi-mbeans-in-same-jvm = on + pekko.loglevel = INFO + pekko.actor.provider=cluster + pekko.cluster.jmx.multi-mbeans-in-same-jvm = on """) with ImplicitSender { "System messages sequence numbers" should { @@ -64,7 +64,7 @@ class ResetSystemMessageSeqNrSpec extends ArteryMultiNodeSpec(""" val sys3 = newRemoteSystem( name = Some(system.name), - extraConfig = Some(s"akka.remote.artery.canonical.port=${Cluster(sys2).selfAddress.port.get}")) + extraConfig = Some(s"pekko.remote.artery.canonical.port=${Cluster(sys2).selfAddress.port.get}")) Cluster(sys3).join(Cluster(system).selfAddress) within(10.seconds) { awaitAssert { diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ShutdownAfterJoinSeedNodesSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ShutdownAfterJoinSeedNodesSpec.scala index 4f04c68e5d..962614ce5e 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/ShutdownAfterJoinSeedNodesSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/ShutdownAfterJoinSeedNodesSpec.scala @@ -16,11 +16,11 @@ import pekko.testkit._ object ShutdownAfterJoinSeedNodesSpec { val config = """ - akka.actor.provider = "cluster" - akka.coordinated-shutdown.terminate-actor-system = on - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.cluster { + pekko.actor.provider = "cluster" + pekko.coordinated-shutdown.terminate-actor-system = on + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.cluster { seed-node-timeout = 2s retry-unsuccessful-join-after = 2s shutdown-after-unsuccessful-join-seed-nodes = 5s diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/StartupWithOneThreadSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/StartupWithOneThreadSpec.scala index d87f6a49be..59a1b027a9 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/StartupWithOneThreadSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/StartupWithOneThreadSpec.scala @@ -16,18 +16,18 @@ import pekko.testkit.ImplicitSender object StartupWithOneThreadSpec { val config = """ - akka.actor.provider = "cluster" - akka.actor.creation-timeout = 10s - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider = "cluster" + pekko.actor.creation-timeout = 10s + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 - akka.actor.default-dispatcher { + pekko.actor.default-dispatcher { executor = thread-pool-executor thread-pool-executor { fixed-pool-size = 1 } } - akka.actor.internal-dispatcher = akka.actor.default-dispatcher + pekko.actor.internal-dispatcher = pekko.actor.default-dispatcher """ final case class GossipTo(address: Address) diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/protobuf/ClusterMessageSerializerSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/protobuf/ClusterMessageSerializerSpec.scala index fa521a50fa..5e75de2413 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/protobuf/ClusterMessageSerializerSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/protobuf/ClusterMessageSerializerSpec.scala @@ -19,7 +19,7 @@ import pekko.testkit.AkkaSpec import pekko.util.Version @nowarn -class ClusterMessageSerializerSpec extends AkkaSpec("akka.actor.provider = cluster") { +class ClusterMessageSerializerSpec extends AkkaSpec("pekko.actor.provider = cluster") { val serializer = new ClusterMessageSerializer(system.asInstanceOf[ExtendedActorSystem]) diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/routing/ClusterRouterSupervisorSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/routing/ClusterRouterSupervisorSpec.scala index 29bf9a8d59..5649f511f7 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/routing/ClusterRouterSupervisorSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/routing/ClusterRouterSupervisorSpec.scala @@ -24,9 +24,9 @@ object ClusterRouterSupervisorSpec { } class ClusterRouterSupervisorSpec extends AkkaSpec(""" - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 """) { import ClusterRouterSupervisorSpec._ diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/sbr/LeaseMajoritySpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/sbr/LeaseMajoritySpec.scala index 2ce2011dcc..4b39e3e103 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/sbr/LeaseMajoritySpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/sbr/LeaseMajoritySpec.scala @@ -13,15 +13,15 @@ class LeaseMajoritySpec extends AkkaSpec() with Eventually { val default = ConfigFactory .parseString( """ - akka.cluster.split-brain-resolver.lease-majority.lease-implementation = "akka.coordination.lease.kubernetes" + pekko.cluster.split-brain-resolver.lease-majority.lease-implementation = "pekko.coordination.lease.kubernetes" """) .withFallback(ConfigFactory.load()) val blank = ConfigFactory.parseString(""" - akka.cluster.split-brain-resolver.lease-majority { + pekko.cluster.split-brain-resolver.lease-majority { lease-name = " " }""").withFallback(default) val named = ConfigFactory.parseString(""" - akka.cluster.split-brain-resolver.lease-majority { + pekko.cluster.split-brain-resolver.lease-majority { lease-name = "shopping-cart-akka-sbr" }""").withFallback(default) diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverSpec.scala index 51d1ab1cbb..218e8b3b2a 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/sbr/SplitBrainResolverSpec.scala @@ -95,7 +95,7 @@ object SplitBrainResolverSpec { class SplitBrainResolverSpec extends AkkaSpec(""" - |akka { + |pekko { | actor.provider = cluster | cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" | cluster.split-brain-resolver.active-strategy=keep-majority diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/testkit/AutoDown.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/testkit/AutoDown.scala index 87c731cd14..8031cc28e9 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/testkit/AutoDown.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/testkit/AutoDown.scala @@ -60,7 +60,7 @@ final class AutoDowning(system: ActorSystem) extends DowningProvider { private def clusterSettings = Cluster(system).settings private val AutoDownUnreachableAfter: Duration = { - val key = "akka.cluster.testkit.auto-down-unreachable-after" + val key = "pekko.cluster.testkit.auto-down-unreachable-after" // it's not in reference.conf, since only used in tests if (clusterSettings.config.hasPath(key)) { toRootLowerCase(clusterSettings.config.getString(key)) match { diff --git a/akka-cluster/src/test/scala/org/apache/pekko/cluster/testkit/AutoDownSpec.scala b/akka-cluster/src/test/scala/org/apache/pekko/cluster/testkit/AutoDownSpec.scala index 2e260828d6..b965152bd3 100644 --- a/akka-cluster/src/test/scala/org/apache/pekko/cluster/testkit/AutoDownSpec.scala +++ b/akka-cluster/src/test/scala/org/apache/pekko/cluster/testkit/AutoDownSpec.scala @@ -40,8 +40,8 @@ object AutoDownSpec { } class AutoDownSpec extends AkkaSpec(""" - |akka.actor.provider=remote - |akka.remote.warn-about-direct-use=off + |pekko.actor.provider=remote + |pekko.remote.warn-about-direct-use=off |""".stripMargin) { import AutoDownSpec._ diff --git a/akka-coordination/src/main/resources/reference.conf b/akka-coordination/src/main/resources/reference.conf index 310ab52099..502c2554d1 100644 --- a/akka-coordination/src/main/resources/reference.conf +++ b/akka-coordination/src/main/resources/reference.conf @@ -1,4 +1,4 @@ -akka.coordination { +pekko.coordination { # Defaults for any lease implementation that doesn't include these properties lease { diff --git a/akka-coordination/src/main/scala/org/apache/pekko/coordination/lease/scaladsl/LeaseProvider.scala b/akka-coordination/src/main/scala/org/apache/pekko/coordination/lease/scaladsl/LeaseProvider.scala index 5f66b9369e..1570970134 100644 --- a/akka-coordination/src/main/scala/org/apache/pekko/coordination/lease/scaladsl/LeaseProvider.scala +++ b/akka-coordination/src/main/scala/org/apache/pekko/coordination/lease/scaladsl/LeaseProvider.scala @@ -61,7 +61,7 @@ final class LeaseProvider(system: ExtendedActorSystem) extends Extension { override def apply(t: LeaseKey): Lease = { val leaseConfig = system.settings.config .getConfig(configPath) - .withFallback(system.settings.config.getConfig("akka.coordination.lease")) + .withFallback(system.settings.config.getConfig("pekko.coordination.lease")) val settings = LeaseSettings(leaseConfig, leaseName, ownerName) diff --git a/akka-discovery/src/main/resources/reference.conf b/akka-discovery/src/main/resources/reference.conf index c12a811a62..00a78f456b 100644 --- a/akka-discovery/src/main/resources/reference.conf +++ b/akka-discovery/src/main/resources/reference.conf @@ -1,8 +1,8 @@ ###################################################### -# Akka Discovery Config # +# Pekko Discovery Config # ###################################################### -akka.actor.deployment { +pekko.actor.deployment { "/SD-DNS/async-dns" { mailbox = "unbounded" router = "round-robin-pool" @@ -10,13 +10,13 @@ akka.actor.deployment { } } -akka.discovery { +pekko.discovery { # Users MUST configure this value to set the default discovery method. # - # The value can be an implementation config path name, such as "akka-dns", - # which would attempt to resolve as `akka.discovery.akka-dns` which is expected - # to contain a `class` setting. As fallback, the root `akka-dns` setting scope + # The value can be an implementation config path name, such as "pekko-dns", + # which would attempt to resolve as `pekko.discovery.pekko-dns` which is expected + # to contain a `class` setting. As fallback, the root `pekko-dns` setting scope # would be used. If none of those contained a `class` setting, then the value is # assumed to be a class name, and an attempt is made to instantiate it. method = "" @@ -26,7 +26,7 @@ akka.discovery { class = org.apache.pekko.discovery.config.ConfigServiceDiscovery # Location of the services in configuration - services-path = "akka.discovery.config.services" + services-path = "pekko.discovery.config.services" # A map of services to resolve from configuration. # See docs for more examples. @@ -62,13 +62,13 @@ akka.discovery { class = org.apache.pekko.discovery.aggregate.AggregateServiceDiscovery # List of service discovery methods to try in order. E.g config then fall back to DNS - # ["config", "akka-dns"] + # ["config", "pekko-dns"] discovery-methods = [] } # DNS based service discovery - akka-dns { + pekko-dns { class = org.apache.pekko.discovery.dns.DnsServiceDiscovery } } diff --git a/akka-discovery/src/main/scala/org/apache/pekko/discovery/Discovery.scala b/akka-discovery/src/main/scala/org/apache/pekko/discovery/Discovery.scala index 8839ee451d..b09930b3a3 100644 --- a/akka-discovery/src/main/scala/org/apache/pekko/discovery/Discovery.scala +++ b/akka-discovery/src/main/scala/org/apache/pekko/discovery/Discovery.scala @@ -23,19 +23,19 @@ final class Discovery(implicit system: ExtendedActorSystem) extends Extension { } private lazy val _defaultImplMethod = - system.settings.config.getString("akka.discovery.method") match { + system.settings.config.getString("pekko.discovery.method") match { case "" => throw new IllegalArgumentException( "No default service discovery implementation configured in " + - "`akka.discovery.method`. Make sure to configure this setting to your preferred implementation such as " + - "'akka-dns' in your application.conf (from the akka-discovery module).") + "`pekko.discovery.method`. Make sure to configure this setting to your preferred implementation such as " + + "'pekko-dns' in your application.conf (from the akka-discovery module).") case method => method } private lazy val defaultImpl = loadServiceDiscovery(_defaultImplMethod) /** - * Default [[ServiceDiscovery]] as configured in `akka.discovery.method`. + * Default [[ServiceDiscovery]] as configured in `pekko.discovery.method`. */ @throws[IllegalArgumentException] def discovery: ServiceDiscovery = defaultImpl @@ -43,7 +43,7 @@ final class Discovery(implicit system: ExtendedActorSystem) extends Extension { /** * Create a [[ServiceDiscovery]] from configuration property. * The given `method` parameter is used to find configuration property - * "akka.discovery.[method].class". + * "pekko.discovery.[method].class". * * The `ServiceDiscovery` instance for a given `method` will be created * once and subsequent requests for the same `method` will return the same instance. @@ -63,15 +63,9 @@ final class Discovery(implicit system: ExtendedActorSystem) extends Extension { def classNameFromConfig(path: String): String = { if (config.hasPath(path)) config.getString(path) - else { - // TODO: Update the org.apache.pekko to akka workaround when HOCON config gets updated - val replacePath = path.replace("org.apache.pekko", "akka") - if (config.hasPath(replacePath)) - config.getString(replacePath) - else - throw new IllegalArgumentException( - s"$path must point to a FQN of a `org.apache.pekko.discovery.ServiceDiscovery` implementation") - } + else + throw new IllegalArgumentException( + s"$path must point to a FQN of a `org.apache.pekko.discovery.ServiceDiscovery` implementation") } def create(clazzName: String): Try[ServiceDiscovery] = { @@ -87,7 +81,7 @@ final class Discovery(implicit system: ExtendedActorSystem) extends Extension { } } - val configName = "org.apache.pekko.discovery." + method + ".class" + val configName = s"pekko.discovery.$method.class" val instanceTry = create(classNameFromConfig(configName)) instanceTry match { @@ -124,11 +118,11 @@ object Discovery extends ExtensionId[Discovery] with ExtensionIdProvider { try { system.dynamicAccess.getClassFor[Any]("org.apache.pekko.discovery.SimpleServiceDiscovery").get throw new RuntimeException( - "Old version of Akka Discovery from Akka Management found on the classpath. Remove `com.lightbend.akka.discovery:akka-discovery` from the classpath..") + "Old version of Akka Discovery from Akka Management found on the classpath. Remove `com.lightbend.pekko.discovery:akka-discovery` from the classpath..") } catch { case _: ClassCastException => throw new RuntimeException( - "Old version of Akka Discovery from Akka Management found on the classpath. Remove `com.lightbend.akka.discovery:akka-discovery` from the classpath..") + "Old version of Akka Discovery from Akka Management found on the classpath. Remove `com.lightbend.pekko.discovery:akka-discovery` from the classpath..") case _: ClassNotFoundException => // all good } diff --git a/akka-discovery/src/main/scala/org/apache/pekko/discovery/aggregate/AggregateServiceDiscovery.scala b/akka-discovery/src/main/scala/org/apache/pekko/discovery/aggregate/AggregateServiceDiscovery.scala index 6e29ddb686..66332b7efc 100644 --- a/akka-discovery/src/main/scala/org/apache/pekko/discovery/aggregate/AggregateServiceDiscovery.scala +++ b/akka-discovery/src/main/scala/org/apache/pekko/discovery/aggregate/AggregateServiceDiscovery.scala @@ -52,7 +52,7 @@ private[pekko] final class AggregateServiceDiscovery(system: ExtendedActorSystem private val log = Logging(system, classOf[AggregateServiceDiscovery]) private val settings = - new AggregateServiceDiscoverySettings(system.settings.config.getConfig("akka.discovery.aggregate")) + new AggregateServiceDiscoverySettings(system.settings.config.getConfig("pekko.discovery.aggregate")) private val methods = { val serviceDiscovery = Discovery(system) diff --git a/akka-discovery/src/main/scala/org/apache/pekko/discovery/config/ConfigServiceDiscovery.scala b/akka-discovery/src/main/scala/org/apache/pekko/discovery/config/ConfigServiceDiscovery.scala index 0606505f16..36d3c55fbd 100644 --- a/akka-discovery/src/main/scala/org/apache/pekko/discovery/config/ConfigServiceDiscovery.scala +++ b/akka-discovery/src/main/scala/org/apache/pekko/discovery/config/ConfigServiceDiscovery.scala @@ -54,7 +54,7 @@ private[pekko] class ConfigServiceDiscovery(system: ExtendedActorSystem) extends private val log = Logging(system, classOf[ConfigServiceDiscovery]) private val resolvedServices = ConfigServicesParser.parse( - system.settings.config.getConfig(system.settings.config.getString("akka.discovery.config.services-path"))) + system.settings.config.getConfig(system.settings.config.getString("pekko.discovery.config.services-path"))) log.debug("Config discovery serving: {}", resolvedServices) diff --git a/akka-discovery/src/main/scala/org/apache/pekko/discovery/dns/DnsServiceDiscovery.scala b/akka-discovery/src/main/scala/org/apache/pekko/discovery/dns/DnsServiceDiscovery.scala index 19e6fcf09f..7f659450a6 100644 --- a/akka-discovery/src/main/scala/org/apache/pekko/discovery/dns/DnsServiceDiscovery.scala +++ b/akka-discovery/src/main/scala/org/apache/pekko/discovery/dns/DnsServiceDiscovery.scala @@ -77,7 +77,7 @@ private[pekko] class DnsServiceDiscovery(system: ExtendedActorSystem) extends Se // exposed for testing private[dns] def initializeDns(): ActorRef = { - if (system.settings.config.getString("akka.io.dns.resolver") == "async-dns") { + if (system.settings.config.getString("pekko.io.dns.resolver") == "async-dns") { log.debug("using system resolver as it is set to async-dns") IO(Dns)(system) } else { diff --git a/akka-discovery/src/test/java/jdoc/org/apache/pekko/discovery/CompileOnlyTest.java b/akka-discovery/src/test/java/jdoc/org/apache/pekko/discovery/CompileOnlyTest.java index a9713f275f..de502c7ffd 100644 --- a/akka-discovery/src/test/java/jdoc/org/apache/pekko/discovery/CompileOnlyTest.java +++ b/akka-discovery/src/test/java/jdoc/org/apache/pekko/discovery/CompileOnlyTest.java @@ -20,15 +20,15 @@ public class CompileOnlyTest { // #loading // #basic - serviceDiscovery.lookup(Lookup.create("akka.io"), Duration.ofSeconds(1)); + serviceDiscovery.lookup(Lookup.create("pekko.io"), Duration.ofSeconds(1)); // convenience for a Lookup with only a serviceName - serviceDiscovery.lookup("akka.io", Duration.ofSeconds(1)); + serviceDiscovery.lookup("pekko.io", Duration.ofSeconds(1)); // #basic // #full CompletionStage lookup = serviceDiscovery.lookup( - Lookup.create("akka.io").withPortName("remoting").withProtocol("tcp"), + Lookup.create("pekko.io").withPortName("remoting").withProtocol("tcp"), Duration.ofSeconds(1)); // #full diff --git a/akka-discovery/src/test/scala/doc/org/apache/pekko/discovery/CompileOnlySpec.scala b/akka-discovery/src/test/scala/doc/org/apache/pekko/discovery/CompileOnlySpec.scala index e85ff40dd4..f71cf09112 100644 --- a/akka-discovery/src/test/scala/doc/org/apache/pekko/discovery/CompileOnlySpec.scala +++ b/akka-discovery/src/test/scala/doc/org/apache/pekko/discovery/CompileOnlySpec.scala @@ -22,9 +22,9 @@ object CompileOnlySpec { // #basic import org.apache.pekko.discovery.Lookup - serviceDiscovery.lookup(Lookup("akka.io"), 1.second) + serviceDiscovery.lookup(Lookup("pekko.io"), 1.second) // Convenience for a Lookup with only a serviceName - serviceDiscovery.lookup("akka.io", 1.second) + serviceDiscovery.lookup("pekko.io", 1.second) // #basic // #full @@ -33,7 +33,7 @@ object CompileOnlySpec { import pekko.discovery.ServiceDiscovery.Resolved val lookup: Future[Resolved] = - serviceDiscovery.lookup(Lookup("akka.io").withPortName("remoting").withProtocol("tcp"), 1.second) + serviceDiscovery.lookup(Lookup("pekko.io").withPortName("remoting").withProtocol("tcp"), 1.second) // #full // compiler diff --git a/akka-discovery/src/test/scala/org/apache/pekko/discovery/DiscoveryConfigurationSpec.scala b/akka-discovery/src/test/scala/org/apache/pekko/discovery/DiscoveryConfigurationSpec.scala index cb9a82ee7d..4d40448875 100644 --- a/akka-discovery/src/test/scala/org/apache/pekko/discovery/DiscoveryConfigurationSpec.scala +++ b/akka-discovery/src/test/scala/org/apache/pekko/discovery/DiscoveryConfigurationSpec.scala @@ -29,16 +29,16 @@ class DiscoveryConfigurationSpec extends AnyWordSpec with Matchers { } finally TestKit.shutdownActorSystem(sys) } - "select implementation from config by config name (inside org.apache.pekko.discovery namespace)" in { + "select implementation from config by config name (inside pekko.discovery namespace)" in { val className = classOf[FakeTestDiscovery].getCanonicalName val sys = ActorSystem( "DiscoveryConfigurationSpec", ConfigFactory.parseString(s""" - akka.discovery { - method = akka-mock-inside + pekko.discovery { + method = pekko-mock-inside - akka-mock-inside { + pekko-mock-inside { class = $className } } @@ -55,7 +55,7 @@ class DiscoveryConfigurationSpec extends AnyWordSpec with Matchers { val sys = ActorSystem( "DiscoveryConfigurationSpec", ConfigFactory.parseString(s""" - akka.discovery { + pekko.discovery { method = mock1 mock1 { @@ -80,7 +80,7 @@ class DiscoveryConfigurationSpec extends AnyWordSpec with Matchers { val sys = ActorSystem( "DiscoveryConfigurationSpec", ConfigFactory.parseString(s""" - akka.discovery { + pekko.discovery { method = mock1 mock1 { @@ -106,7 +106,7 @@ class DiscoveryConfigurationSpec extends AnyWordSpec with Matchers { val sys = ActorSystem( "DiscoveryConfigurationSpec", ConfigFactory.parseString(s""" - akka.discovery { + pekko.discovery { method = "mock1" mock1 { class = $className @@ -125,7 +125,7 @@ class DiscoveryConfigurationSpec extends AnyWordSpec with Matchers { val sys = ActorSystem( "DiscoveryConfigurationSpec", ConfigFactory.parseString(s""" - akka.discovery { + pekko.discovery { method = "$className" } """).withFallback(ConfigFactory.load())) diff --git a/akka-discovery/src/test/scala/org/apache/pekko/discovery/aggregate/AggregateServiceDiscoverySpec.scala b/akka-discovery/src/test/scala/org/apache/pekko/discovery/aggregate/AggregateServiceDiscoverySpec.scala index b6b18d75f9..7a1d87c7b4 100644 --- a/akka-discovery/src/test/scala/org/apache/pekko/discovery/aggregate/AggregateServiceDiscoverySpec.scala +++ b/akka-discovery/src/test/scala/org/apache/pekko/discovery/aggregate/AggregateServiceDiscoverySpec.scala @@ -39,7 +39,7 @@ class StubbedServiceDiscovery(@unused system: ExtendedActorSystem) extends Servi object AggregateServiceDiscoverySpec { val config: Config = ConfigFactory.parseString(""" - akka { + pekko { loglevel = DEBUG discovery { method = aggregate @@ -50,11 +50,11 @@ object AggregateServiceDiscoverySpec { } } - akka.discovery.stubbed1 { + pekko.discovery.stubbed1 { class = org.apache.pekko.discovery.aggregate.StubbedServiceDiscovery } - akka.discovery.config.services = { + pekko.discovery.config.services = { config1 = { endpoints = [ { diff --git a/akka-discovery/src/test/scala/org/apache/pekko/discovery/config/ConfigServiceDiscoverySpec.scala b/akka-discovery/src/test/scala/org/apache/pekko/discovery/config/ConfigServiceDiscoverySpec.scala index f0d85e025b..4da34655b4 100644 --- a/akka-discovery/src/test/scala/org/apache/pekko/discovery/config/ConfigServiceDiscoverySpec.scala +++ b/akka-discovery/src/test/scala/org/apache/pekko/discovery/config/ConfigServiceDiscoverySpec.scala @@ -21,7 +21,7 @@ import org.apache.pekko.testkit.TestKit object ConfigServiceDiscoverySpec { val config: Config = ConfigFactory.parseString(""" -akka { +pekko { loglevel = DEBUG discovery { method = config diff --git a/akka-discovery/src/test/scala/org/apache/pekko/discovery/dns/DnsDiscoverySpec.scala b/akka-discovery/src/test/scala/org/apache/pekko/discovery/dns/DnsDiscoverySpec.scala index c224d300a6..feace3d442 100644 --- a/akka-discovery/src/test/scala/org/apache/pekko/discovery/dns/DnsDiscoverySpec.scala +++ b/akka-discovery/src/test/scala/org/apache/pekko/discovery/dns/DnsDiscoverySpec.scala @@ -19,21 +19,21 @@ import pekko.testkit.{ SocketUtil, TestKit } object DnsDiscoverySpec { val config = ConfigFactory.parseString(s""" - akka { + pekko { discovery { - method = akka-dns + method = pekko-dns } } - akka { + pekko { loglevel = DEBUG } - akka.io.dns.async-dns.nameservers = ["localhost:${DnsDiscoverySpec.dockerDnsServerPort}"] + pekko.io.dns.async-dns.nameservers = ["localhost:${DnsDiscoverySpec.dockerDnsServerPort}"] """) lazy val dockerDnsServerPort = SocketUtil.temporaryLocalPort() val configWithAsyncDnsResolverAsDefault = ConfigFactory.parseString(""" - akka.io.dns.resolver = "async-dns" + pekko.io.dns.resolver = "async-dns" """).withFallback(config) } diff --git a/akka-distributed-data/src/main/java/org/apache/pekko/cluster/ddata/protobuf/msg/ReplicatorMessages.java b/akka-distributed-data/src/main/java/org/apache/pekko/cluster/ddata/protobuf/msg/ReplicatorMessages.java index c117c1662d..c9042f2925 100644 --- a/akka-distributed-data/src/main/java/org/apache/pekko/cluster/ddata/protobuf/msg/ReplicatorMessages.java +++ b/akka-distributed-data/src/main/java/org/apache/pekko/cluster/ddata/protobuf/msg/ReplicatorMessages.java @@ -23899,7 +23899,7 @@ public final class ReplicatorMessages { static { java.lang.String[] descriptorData = { "\n\030ReplicatorMessages.proto\022\022org.apache.pekko.cluster" + - ".ddata\"\307\001\n\003Get\022-\n\003key\030\001 \002(\0132 .akka.clust" + + ".ddata\"\307\001\n\003Get\022-\n\003key\030\001 \002(\0132 .pekko.clust" + "er.ddata.OtherMessage\022\023\n\013consistency\030\002 \002" + "(\021\022\017\n\007timeout\030\003 \002(\r\0221\n\007request\030\004 \001(\0132 .a" + "kka.cluster.ddata.OtherMessage\022\031\n\021consis" + @@ -23911,7 +23911,7 @@ public final class ReplicatorMessages { "erMessage\"l\n\010NotFound\022-\n\003key\030\001 \002(\0132 .akk" + "a.cluster.ddata.OtherMessage\0221\n\007request\030" + "\002 \001(\0132 .org.apache.pekko.cluster.ddata.OtherMessage\"" + - "n\n\nGetFailure\022-\n\003key\030\001 \002(\0132 .akka.cluste" + + "n\n\nGetFailure\022-\n\003key\030\001 \002(\0132 .pekko.cluste" + "r.ddata.OtherMessage\0221\n\007request\030\002 \001(\0132 ." + "org.apache.pekko.cluster.ddata.OtherMessage\"G\n\tSubsc" + "ribe\022-\n\003key\030\001 \002(\0132 .org.apache.pekko.cluster.ddata.O" + @@ -23932,20 +23932,20 @@ public final class ReplicatorMessages { "\003(\0132-.org.apache.pekko.cluster.ddata.DataEnvelope.Pr" + "uningEntry\0228\n\rdeltaVersions\030\003 \001(\0132!.akka" + ".cluster.ddata.VersionVector\032\326\001\n\014Pruning" + - "Entry\0229\n\016removedAddress\030\001 \002(\0132!.akka.clu" + + "Entry\0229\n\016removedAddress\030\001 \002(\0132!.pekko.clu" + "ster.ddata.UniqueAddress\0227\n\014ownerAddress" + "\030\002 \002(\0132!.org.apache.pekko.cluster.ddata.UniqueAddres" + "s\022\021\n\tperformed\030\003 \002(\010\022)\n\004seen\030\004 \003(\0132\033.akk" + "a.cluster.ddata.Address\022\024\n\014obsoleteTime\030" + "\005 \001(\022\"\257\001\n\006Status\022\r\n\005chunk\030\001 \002(\r\022\021\n\ttotCh" + - "unks\030\002 \002(\r\0221\n\007entries\030\003 \003(\0132 .akka.clust" + + "unks\030\002 \002(\r\0221\n\007entries\030\003 \003(\0132 .pekko.clust" + "er.ddata.Status.Entry\022\023\n\013toSystemUid\030\004 \001" + "(\020\022\025\n\rfromSystemUid\030\005 \001(\020\032$\n\005Entry\022\013\n\003ke" + "y\030\001 \002(\t\022\016\n\006digest\030\002 \002(\014\"\303\001\n\006Gossip\022\020\n\010se" + - "ndBack\030\001 \002(\010\0221\n\007entries\030\002 \003(\0132 .akka.clu" + + "ndBack\030\001 \002(\010\0221\n\007entries\030\002 \003(\0132 .pekko.clu" + "ster.ddata.Gossip.Entry\022\023\n\013toSystemUid\030\003" + " \001(\020\022\025\n\rfromSystemUid\030\004 \001(\020\032H\n\005Entry\022\013\n\003" + - "key\030\001 \002(\t\0222\n\010envelope\030\002 \002(\0132 .akka.clust" + + "key\030\001 \002(\t\0222\n\010envelope\030\002 \002(\0132 .pekko.clust" + "er.ddata.DataEnvelope\"\201\002\n\020DeltaPropagati" + "on\0223\n\010fromNode\030\001 \002(\0132!.org.apache.pekko.cluster.ddat" + "a.UniqueAddress\022;\n\007entries\030\002 \003(\0132*.akka." + diff --git a/akka-distributed-data/src/main/resources/reference.conf b/akka-distributed-data/src/main/resources/reference.conf index e9983f034c..fc8a98b825 100644 --- a/akka-distributed-data/src/main/resources/reference.conf +++ b/akka-distributed-data/src/main/resources/reference.conf @@ -1,5 +1,5 @@ ############################################## -# Akka Distributed DataReference Config File # +# Pekko Distributed DataReference Config File # ############################################## # This is the reference config file that contains all the default settings. @@ -8,7 +8,7 @@ #//#distributed-data # Settings for the DistributedData extension -akka.cluster.distributed-data { +pekko.cluster.distributed-data { # Actor name of the Replicator actor, /system/ddataReplicator name = ddataReplicator @@ -36,7 +36,7 @@ akka.cluster.distributed-data { # The id of the dispatcher to use for Replicator actors. # If specified you need to define the settings of the actual dispatcher. - use-dispatcher = "akka.actor.internal-dispatcher" + use-dispatcher = "pekko.actor.internal-dispatcher" # How often the Replicator checks for pruning of data associated with # removed cluster nodes. If this is set to 'off' the pruning feature will @@ -57,7 +57,7 @@ akka.cluster.distributed-data { # injected and merged with existing data after this time the value will not be correct. # This would be possible (although unlikely) in the case of a long network partition. # It should be in the magnitude of hours. For durable data it is configured by - # 'akka.cluster.distributed-data.durable.pruning-marker-time-to-live'. + # 'pekko.cluster.distributed-data.durable.pruning-marker-time-to-live'. pruning-marker-time-to-live = 6 h # Serialized Write and Read messages are cached when they are sent to @@ -93,16 +93,16 @@ akka.cluster.distributed-data { # be stopped for longer time than this duration and if it is joining again after this # duration its data should first be manually removed (from the lmdb directory). # It should be in the magnitude of days. Note that there is a corresponding setting - # for non-durable data: 'akka.cluster.distributed-data.pruning-marker-time-to-live'. + # for non-durable data: 'pekko.cluster.distributed-data.pruning-marker-time-to-live'. pruning-marker-time-to-live = 10 d # Fully qualified class name of the durable store actor. It must be a subclass - # of akka.actor.Actor and handle the protocol defined in + # of pekko.actor.Actor and handle the protocol defined in # org.apache.pekko.cluster.ddata.DurableStore. The class must have a constructor with # com.typesafe.config.Config parameter. store-actor-class = org.apache.pekko.cluster.ddata.LmdbDurableStore - use-dispatcher = akka.cluster.distributed-data.durable.pinned-store + use-dispatcher = pekko.cluster.distributed-data.durable.pinned-store pinned-store { executor = thread-pool-executor @@ -143,14 +143,14 @@ akka.cluster.distributed-data { #//#distributed-data # Protobuf serializer for cluster DistributedData messages -akka.actor { +pekko.actor { serializers { - akka-data-replication = "org.apache.pekko.cluster.ddata.protobuf.ReplicatorMessageSerializer" - akka-replicated-data = "org.apache.pekko.cluster.ddata.protobuf.ReplicatedDataSerializer" + pekko-data-replication = "org.apache.pekko.cluster.ddata.protobuf.ReplicatorMessageSerializer" + pekko-replicated-data = "org.apache.pekko.cluster.ddata.protobuf.ReplicatedDataSerializer" } serialization-bindings { - "org.apache.pekko.cluster.ddata.Replicator$ReplicatorMessage" = akka-data-replication - "org.apache.pekko.cluster.ddata.ReplicatedDataSerialization" = akka-replicated-data + "org.apache.pekko.cluster.ddata.Replicator$ReplicatorMessage" = pekko-data-replication + "org.apache.pekko.cluster.ddata.ReplicatedDataSerialization" = pekko-replicated-data } serialization-identifiers { "org.apache.pekko.cluster.ddata.protobuf.ReplicatedDataSerializer" = 11 diff --git a/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/DistributedData.scala b/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/DistributedData.scala index c94bebb2e0..cb49e191a9 100644 --- a/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/DistributedData.scala +++ b/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/DistributedData.scala @@ -28,7 +28,7 @@ object DistributedData extends ExtensionId[DistributedData] with ExtensionIdProv /** * Akka extension for convenient configuration and use of the * [[Replicator]]. Configuration settings are defined in the - * `akka.cluster.ddata` section, see `reference.conf`. + * `pekko.cluster.ddata` section, see `reference.conf`. */ class DistributedData(system: ExtendedActorSystem) extends Extension { diff --git a/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/ReplicatedData.scala b/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/ReplicatedData.scala index 320162b5db..87d2bb75db 100644 --- a/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/ReplicatedData.scala +++ b/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/ReplicatedData.scala @@ -125,7 +125,7 @@ trait RequiresCausalDeliveryOfDeltas extends ReplicatedDelta * threshold such deltas are discarded and sent as full state instead. This * interface should be implemented by such deltas to define its size. * This is number of elements or similar size hint, not size in bytes. - * The threshold is defined in `akka.cluster.distributed-data.delta-crdt.max-delta-size` + * The threshold is defined in `pekko.cluster.distributed-data.delta-crdt.max-delta-size` * or corresponding [[ReplicatorSettings]]. */ trait ReplicatedDeltaSize { diff --git a/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/Replicator.scala b/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/Replicator.scala index 14bc247b30..5d742661bb 100644 --- a/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/Replicator.scala +++ b/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/Replicator.scala @@ -67,14 +67,14 @@ object ReplicatorSettings { /** * Create settings from the default configuration - * `akka.cluster.distributed-data`. + * `pekko.cluster.distributed-data`. */ def apply(system: ActorSystem): ReplicatorSettings = - apply(system.settings.config.getConfig("akka.cluster.distributed-data")) + apply(system.settings.config.getConfig("pekko.cluster.distributed-data")) /** * Create settings from a configuration with the same layout as - * the default configuration `akka.cluster.distributed-data`. + * the default configuration `pekko.cluster.distributed-data`. */ def apply(config: Config): ReplicatorSettings = { val dispatcher = config.getString("use-dispatcher") @@ -119,7 +119,7 @@ object ReplicatorSettings { * The name of the actor used in DistributedData extensions. */ @InternalApi private[pekko] def name(system: ActorSystem, modifier: Option[String]): String = { - val name = system.settings.config.getString("akka.cluster.distributed-data.name") + val name = system.settings.config.getString("pekko.cluster.distributed-data.name") modifier.map(s => s + name.take(1).toUpperCase + name.drop(1)).getOrElse(name) } } @@ -1376,7 +1376,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog val remoteSettings = remoteProvider.remoteSettings val maxFrameSize = if (remoteSettings.Artery.Enabled) remoteSettings.Artery.Advanced.MaximumFrameSize - else context.system.settings.config.getBytes("akka.remote.classic.netty.tcp.maximum-frame-size").toInt + else context.system.settings.config.getBytes("pekko.remote.classic.netty.tcp.maximum-frame-size").toInt new PayloadSizeAggregator(log, sizeExceeding, maxFrameSize) } diff --git a/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala b/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala index 9720aaccca..85998e0feb 100644 --- a/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala +++ b/akka-distributed-data/src/main/scala/org/apache/pekko/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala @@ -156,7 +156,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) import ReplicatorMessageSerializer.SmallCache private val cacheTimeToLive = system.settings.config - .getDuration("akka.cluster.distributed-data.serializer-cache-time-to-live", TimeUnit.MILLISECONDS) + .getDuration("pekko.cluster.distributed-data.serializer-cache-time-to-live", TimeUnit.MILLISECONDS) .millis private val readCache = new SmallCache[Read, Array[Byte]](4, cacheTimeToLive, m => readToProto(m).toByteArray) private val writeCache = new SmallCache[Write, Array[Byte]](4, cacheTimeToLive, m => writeToProto(m).toByteArray) diff --git a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/DurableDataSpec.scala b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/DurableDataSpec.scala index be9326c943..891f30b7c4 100644 --- a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/DurableDataSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/DurableDataSpec.scala @@ -26,17 +26,17 @@ final case class DurableDataSpecConfig(writeBehind: Boolean) extends MultiNodeCo val first = role("first") val second = role("second") - commonConfig(ConfigFactory.parseString(s"""akka.loglevel = DEBUG - akka.actor.provider = "org.apache.pekko.cluster.ClusterActorRefProvider" - akka.log-dead-letters-during-shutdown = off - akka.cluster.distributed-data.durable.keys = ["durable*"] - akka.cluster.distributed-data.durable.lmdb { + commonConfig(ConfigFactory.parseString(s"""pekko.loglevel = DEBUG + pekko.actor.provider = "org.apache.pekko.cluster.ClusterActorRefProvider" + pekko.log-dead-letters-during-shutdown = off + pekko.cluster.distributed-data.durable.keys = ["durable*"] + pekko.cluster.distributed-data.durable.lmdb { dir = target/DurableDataSpec-${System.currentTimeMillis}-ddata map-size = 10 MiB write-behind-interval = ${if (writeBehind) "200ms" else "off"} } # initialization of lmdb can be very slow in CI environment - akka.test.single-expect-default = 15s + pekko.test.single-expect-default = 15s """)) } @@ -287,8 +287,8 @@ abstract class DurableDataSpec(multiNodeConfig: DurableDataSpecConfig) "AdditionalSys", // use the same port ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port = ${address.port.get} - akka.remote.classic.netty.tcp.port = ${address.port.get} + pekko.remote.artery.canonical.port = ${address.port.get} + pekko.remote.classic.netty.tcp.port = ${address.port.get} """).withFallback(system.settings.config)) try { Cluster(sys2).join(address) diff --git a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/DurablePruningSpec.scala b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/DurablePruningSpec.scala index 6e2561a554..fcf193102d 100644 --- a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/DurablePruningSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/DurablePruningSpec.scala @@ -26,11 +26,11 @@ object DurablePruningSpec extends MultiNodeConfig { val second = role("second") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off - akka.cluster.distributed-data.durable.keys = ["*"] - akka.cluster.distributed-data.durable.lmdb { + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.log-dead-letters-during-shutdown = off + pekko.cluster.distributed-data.durable.keys = ["*"] + pekko.cluster.distributed-data.durable.lmdb { dir = target/DurablePruningSpec-${System.currentTimeMillis}-ddata map-size = 10 MiB } @@ -162,8 +162,8 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN val sys3 = ActorSystem( system.name, ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port = ${address.port.get} - akka.remote.classic.netty.tcp.port = ${address.port.get} + pekko.remote.artery.canonical.port = ${address.port.get} + pekko.remote.classic.netty.tcp.port = ${address.port.get} """).withFallback(system.settings.config)) val cluster3 = Cluster(sys3) val replicator3 = startReplicator(sys3) diff --git a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/JepsenInspiredInsertSpec.scala b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/JepsenInspiredInsertSpec.scala index c7ac99a5b8..1bae99e939 100644 --- a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/JepsenInspiredInsertSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/JepsenInspiredInsertSpec.scala @@ -27,12 +27,12 @@ object JepsenInspiredInsertSpec extends MultiNodeConfig { val n5 = role("n5") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters = off - akka.log-dead-letters-during-shutdown = off - akka.remote.log-remote-lifecycle-events = ERROR - akka.testconductor.barrier-timeout = 60 s + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.log-dead-letters = off + pekko.log-dead-letters-during-shutdown = off + pekko.remote.log-remote-lifecycle-events = ERROR + pekko.testconductor.barrier-timeout = 60 s """)) testTransport(on = true) diff --git a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/PerformanceSpec.scala b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/PerformanceSpec.scala index f63a7bf70a..69d6f4b505 100644 --- a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/PerformanceSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/PerformanceSpec.scala @@ -28,23 +28,23 @@ object PerformanceSpec extends MultiNodeConfig { val n5 = role("n5") commonConfig(ConfigFactory.parseString(s""" - akka.loglevel = ERROR - akka.stdout-loglevel = ERROR - akka.loggers = ["org.apache.pekko.event.Logging$$DefaultLogger"] - akka.actor.provider = "cluster" - akka.log-dead-letters = off - akka.log-dead-letters-during-shutdown = off - akka.remote.classic.log-remote-lifecycle-events = ERROR - akka.remote.classic.log-frame-size-exceeding=1000b - akka.remote.artery.log-frame-size-exceeding=1000b - akka.testconductor.barrier-timeout = 60 s - akka.cluster.distributed-data.gossip-interval = 1 s + pekko.loglevel = ERROR + pekko.stdout-loglevel = ERROR + pekko.loggers = ["org.apache.pekko.event.Logging$$DefaultLogger"] + pekko.actor.provider = "cluster" + pekko.log-dead-letters = off + pekko.log-dead-letters-during-shutdown = off + pekko.remote.classic.log-remote-lifecycle-events = ERROR + pekko.remote.classic.log-frame-size-exceeding=1000b + pekko.remote.artery.log-frame-size-exceeding=1000b + pekko.testconductor.barrier-timeout = 60 s + pekko.cluster.distributed-data.gossip-interval = 1 s - #akka.cluster.distributed-data.durable.keys = ["*"] - #akka.cluster.distributed-data.durable.lmdb.dir = target/PerformanceSpec-${System.currentTimeMillis}-ddata - #akka.cluster.distributed-data.durable.lmdb.write-behind-interval = 200ms + #pekko.cluster.distributed-data.durable.keys = ["*"] + #pekko.cluster.distributed-data.durable.lmdb.dir = target/PerformanceSpec-${System.currentTimeMillis}-ddata + #pekko.cluster.distributed-data.durable.lmdb.write-behind-interval = 200ms - #akka.cluster.distributed-data.delta-crdt.enabled = off + #pekko.cluster.distributed-data.delta-crdt.enabled = off """)) def countDownProps(latch: TestLatch): Props = Props(new CountDown(latch)).withDeploy(Deploy.local) diff --git a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorChaosSpec.scala b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorChaosSpec.scala index f62016506e..da7d0168ac 100644 --- a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorChaosSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorChaosSpec.scala @@ -24,10 +24,10 @@ object ReplicatorChaosSpec extends MultiNodeConfig { val fifth = role("fifth") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.cluster.roles = ["backend"] - akka.log-dead-letters-during-shutdown = off + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.cluster.roles = ["backend"] + pekko.log-dead-letters-during-shutdown = off """)) testTransport(on = true) diff --git a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorDeltaSpec.scala b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorDeltaSpec.scala index 63d82871a1..dfe63f9ec1 100644 --- a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorDeltaSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorDeltaSpec.scala @@ -26,9 +26,9 @@ object ReplicatorDeltaSpec extends MultiNodeConfig { val fourth = role("fourth") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.log-dead-letters-during-shutdown = off """)) testTransport(on = true) diff --git a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorGossipSpec.scala b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorGossipSpec.scala index 5c4b1a39e2..81a680d3b3 100644 --- a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorGossipSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorGossipSpec.scala @@ -22,16 +22,16 @@ object ReplicatorGossipSpec extends MultiNodeConfig { val second = role("second") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.cluster.distributed-data { + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.cluster.distributed-data { # only gossip in this test delta-crdt.enabled = off gossip-interval = 1s max-delta-elements = 400 log-data-size-exceeding = 2000 } - akka.remote.artery { + pekko.remote.artery { log-frame-size-exceeding = 2000 advanced.maximum-frame-size = 50000 } diff --git a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorMapDeltaSpec.scala b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorMapDeltaSpec.scala index f7d2660084..88d9e776f1 100644 --- a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorMapDeltaSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorMapDeltaSpec.scala @@ -26,9 +26,9 @@ object ReplicatorMapDeltaSpec extends MultiNodeConfig { val fourth = role("fourth") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.log-dead-letters-during-shutdown = off """)) testTransport(on = true) diff --git a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorORSetDeltaSpec.scala b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorORSetDeltaSpec.scala index 3ad3d2ec25..af01c47fa4 100644 --- a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorORSetDeltaSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorORSetDeltaSpec.scala @@ -22,9 +22,9 @@ object ReplicatorORSetDeltaSpec extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.log-dead-letters-during-shutdown = off """)) testTransport(on = true) diff --git a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorPruningSpec.scala b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorPruningSpec.scala index 93651a781e..96000fd744 100644 --- a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorPruningSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorPruningSpec.scala @@ -23,12 +23,12 @@ object ReplicatorPruningSpec extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO + pekko.loglevel = INFO # we use 3s as write timeouts in test, make sure we see that # and not time out the expectMsg at the same time - akka.test.single-expect-default = 5s - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off + pekko.test.single-expect-default = 5s + pekko.actor.provider = "cluster" + pekko.log-dead-letters-during-shutdown = off """)) } diff --git a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorSpec.scala b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorSpec.scala index faf0b83729..cdc7f81af8 100644 --- a/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/org/apache/pekko/cluster/ddata/ReplicatorSpec.scala @@ -25,10 +25,10 @@ object ReplicatorSpec extends MultiNodeConfig { val third = role("third") commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off - #akka.cluster.distributed-data.delta-crdt.enabled = off + pekko.loglevel = INFO + pekko.actor.provider = "cluster" + pekko.log-dead-letters-during-shutdown = off + #pekko.cluster.distributed-data.delta-crdt.enabled = off """)) testTransport(on = true) diff --git a/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/LocalConcurrencySpec.scala b/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/LocalConcurrencySpec.scala index 429fc2d9d7..df6152c265 100644 --- a/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/LocalConcurrencySpec.scala +++ b/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/LocalConcurrencySpec.scala @@ -52,9 +52,9 @@ class LocalConcurrencySpec(_system: ActorSystem) ActorSystem( "LocalConcurrencySpec", ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port=0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port=0 + pekko.remote.artery.canonical.port = 0 """))) override def afterAll(): Unit = { diff --git a/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/LotsOfDataBot.scala b/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/LotsOfDataBot.scala index f575da43a2..a4cdcfcd6f 100644 --- a/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/LotsOfDataBot.scala +++ b/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/LotsOfDataBot.scala @@ -33,20 +33,20 @@ object LotsOfDataBot { ports.foreach { port => // Override the configuration of the port val config = ConfigFactory - .parseString("akka.remote.classic.netty.tcp.port=" + port) + .parseString("pekko.remote.classic.netty.tcp.port=" + port) .withFallback( ConfigFactory.load(ConfigFactory.parseString(""" passive = off max-entries = 100000 - akka.actor.provider = "cluster" - akka.remote { + pekko.actor.provider = "cluster" + pekko.remote { artery.canonical { hostname = "127.0.0.1" port = 0 } } - akka.cluster { + pekko.cluster { seed-nodes = [ "akka://ClusterSystem@127.0.0.1:2551", "akka://ClusterSystem@127.0.0.1:2552"] diff --git a/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/ReplicatorSettingsSpec.scala b/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/ReplicatorSettingsSpec.scala index 7bd17b79e0..4cec67498a 100644 --- a/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/ReplicatorSettingsSpec.scala +++ b/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/ReplicatorSettingsSpec.scala @@ -13,10 +13,10 @@ import org.apache.pekko.testkit.AkkaSpec object ReplicatorSettingsSpec { val config = ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.artery.canonical.hostname = 127.0.0.1""") + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.hostname = 127.0.0.1""") } class ReplicatorSettingsSpec diff --git a/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/WriteAggregatorSpec.scala b/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/WriteAggregatorSpec.scala index 1875e0d370..cb80abff76 100644 --- a/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/WriteAggregatorSpec.scala +++ b/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/WriteAggregatorSpec.scala @@ -130,10 +130,10 @@ object WriteAggregatorSpec { } class WriteAggregatorSpec extends AkkaSpec(s""" - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.cluster.distributed-data.durable.lmdb { + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.cluster.distributed-data.durable.lmdb { dir = target/WriteAggregatorSpec-${System.currentTimeMillis}-ddata map-size = 10 MiB } diff --git a/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala b/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala index d3f98c7f95..31d9b3c28b 100644 --- a/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala +++ b/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala @@ -31,10 +31,10 @@ class ReplicatedDataSerializerSpec ActorSystem( "ReplicatedDataSerializerSpec", ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.actor.provider=cluster - akka.remote.classic.netty.tcp.port=0 - akka.remote.artery.canonical.port = 0 + pekko.loglevel = DEBUG + pekko.actor.provider=cluster + pekko.remote.classic.netty.tcp.port=0 + pekko.remote.artery.canonical.port = 0 """))) with AnyWordSpecLike with Matchers diff --git a/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala b/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala index f27807a6d4..44a1683946 100644 --- a/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala +++ b/akka-distributed-data/src/test/scala/org/apache/pekko/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala @@ -37,9 +37,9 @@ class ReplicatorMessageSerializerSpec ActorSystem( "ReplicatorMessageSerializerSpec", ConfigFactory.parseString(""" - akka.actor.provider=cluster - akka.remote.classic.netty.tcp.port=0 - akka.remote.artery.canonical.port = 0 + pekko.actor.provider=cluster + pekko.remote.classic.netty.tcp.port=0 + pekko.remote.artery.canonical.port = 0 """))) with AnyWordSpecLike with Matchers diff --git a/akka-multi-node-testkit/src/main/resources/reference.conf b/akka-multi-node-testkit/src/main/resources/reference.conf index f5193dd30f..c2d1401093 100644 --- a/akka-multi-node-testkit/src/main/resources/reference.conf +++ b/akka-multi-node-testkit/src/main/resources/reference.conf @@ -1,11 +1,11 @@ ############################################# -# Akka Remote Testing Reference Config File # +# Pekko Remote Testing Reference Config File # ############################################# # This is the reference config file that contains all the default settings. # Make your edits/overrides in your application.conf. -akka { +pekko { testconductor { # Timeout for joining a barrier: this is the maximum time any participants diff --git a/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testconductor/Conductor.scala b/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testconductor/Conductor.scala index dc3fbda80d..ebd09d2b2b 100644 --- a/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testconductor/Conductor.scala +++ b/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testconductor/Conductor.scala @@ -66,7 +66,7 @@ trait Conductor { this: TestConductorExt => /** * Start the [[pekko.remote.testconductor.Controller]], which in turn will - * bind to a TCP port as specified in the `akka.testconductor.port` config + * bind to a TCP port as specified in the `pekko.testconductor.port` config * property, where 0 denotes automatic allocation. Since the latter is * actually preferred, a `Future[Int]` is returned which will be completed * with the port number actually chosen, so that this can then be communicated @@ -96,7 +96,7 @@ trait Conductor { this: TestConductorExt => /** * Obtain the port to which the controller’s socket is actually bound. This - * will deviate from the configuration in `akka.testconductor.port` in case + * will deviate from the configuration in `pekko.testconductor.port` in case * that was given as zero. */ def sockAddr: Future[InetSocketAddress] = { @@ -110,7 +110,7 @@ trait Conductor { this: TestConductorExt => * within the netty pipeline until the packet would have been completely sent * according to the given rate, the previous packet completion and the current * packet length. In case of large packets they are split up if the calculated - * send pause would exceed `akka.testconductor.packet-split-threshold` + * send pause would exceed `pekko.testconductor.packet-split-threshold` * (roughly). All of this uses the system’s scheduler, which is not * terribly precise and will execute tasks later than they are schedule (even * on average), but that is countered by using the actual execution time for diff --git a/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testconductor/Extension.scala b/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testconductor/Extension.scala index 981fe9c3c2..e88e0e9f50 100644 --- a/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testconductor/Extension.scala +++ b/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testconductor/Extension.scala @@ -46,7 +46,7 @@ object TestConductor extends ExtensionId[TestConductorExt] with ExtensionIdProvi * more information. * * ====Note==== - * This extension requires the `akka.actor.provider` + * This extension requires the `pekko.actor.provider` * to be a [[pekko.remote.RemoteActorRefProvider]]. * * To use ``blackhole``, ``passThrough``, and ``throttle`` you must activate the @@ -56,7 +56,7 @@ object TestConductor extends ExtensionId[TestConductorExt] with ExtensionIdProvi class TestConductorExt(val system: ExtendedActorSystem) extends Extension with Conductor with Player { object Settings { - val config = system.settings.config.getConfig("akka.testconductor") + val config = system.settings.config.getConfig("pekko.testconductor") import org.apache.pekko.util.Helpers.ConfigOps val ConnectTimeout = config.getMillisDuration("connect-timeout") diff --git a/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testconductor/Player.scala b/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testconductor/Player.scala index 5602550a24..ab10bd5735 100644 --- a/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testconductor/Player.scala +++ b/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testconductor/Player.scala @@ -83,7 +83,7 @@ trait Player { this: TestConductorExt => /** * Connect to the conductor on the given port (the host is taken from setting - * `akka.testconductor.host`). The connection is made asynchronously, but you + * `pekko.testconductor.host`). The connection is made asynchronously, but you * should await completion of the returned Future because that implies that * all expected participants of this test have successfully connected (i.e. * this is a first barrier in itself). The number of expected participants is diff --git a/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testkit/MultiNodeSpec.scala b/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testkit/MultiNodeSpec.scala index d2e38034de..ee2962e8b1 100644 --- a/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testkit/MultiNodeSpec.scala +++ b/akka-multi-node-testkit/src/main/scala/org/apache/pekko/remote/testkit/MultiNodeSpec.scala @@ -59,20 +59,20 @@ abstract class MultiNodeConfig { def debugConfig(on: Boolean): Config = if (on) ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.remote { + pekko.loglevel = DEBUG + pekko.remote { log-received-messages = on log-sent-messages = on } - akka.remote.artery { + pekko.remote.artery { log-received-messages = on log-sent-messages = on } - akka.actor.debug { + pekko.actor.debug { receive = on fsm = on } - akka.remote.log-remote-lifecycle-events = on + pekko.remote.log-remote-lifecycle-events = on """) else ConfigFactory.empty @@ -109,8 +109,8 @@ abstract class MultiNodeConfig { private[pekko] def config: Config = { val transportConfig = if (_testTransport) ConfigFactory.parseString(""" - akka.remote.classic.netty.tcp.applied-adapters = [trttl, gremlin] - akka.remote.artery.advanced.test-mode = on + pekko.remote.classic.netty.tcp.applied-adapters = [trttl, gremlin] + pekko.remote.artery.advanced.test-mode = on """) else ConfigFactory.empty @@ -237,15 +237,15 @@ object MultiNodeSpec { private[testkit] val nodeConfig = mapToConfig( Map( - "akka.actor.provider" -> "remote", - "akka.remote.artery.canonical.hostname" -> selfName, - "akka.remote.classic.netty.tcp.hostname" -> selfName, - "akka.remote.classic.netty.tcp.port" -> tcpPort, - "akka.remote.artery.canonical.port" -> selfPort)) + "pekko.actor.provider" -> "remote", + "pekko.remote.artery.canonical.hostname" -> selfName, + "pekko.remote.classic.netty.tcp.hostname" -> selfName, + "pekko.remote.classic.netty.tcp.port" -> tcpPort, + "pekko.remote.artery.canonical.port" -> selfPort)) private[testkit] val baseConfig: Config = ConfigFactory.parseString(""" - akka { + pekko { loggers = ["org.apache.pekko.testkit.TestEventListener"] loglevel = "WARNING" stdout-loglevel = "WARNING" @@ -275,8 +275,8 @@ object MultiNodeSpec { // Please note that with the current setup only port 5000 and 5001 (or 6000 and 6001 when using UDP) // are exposed in kubernetes def configureNextPortIfFixed(config: Config): Config = { - val arteryPortConfig = getNextPortString("akka.remote.artery.canonical.port", config) - val nettyPortConfig = getNextPortString("akka.remote.classic.netty.tcp.port", config) + val arteryPortConfig = getNextPortString("pekko.remote.artery.canonical.port", config) + val nettyPortConfig = getNextPortString("pekko.remote.classic.netty.tcp.port", config) ConfigFactory.parseString(s"""{ $arteryPortConfig $nettyPortConfig @@ -439,7 +439,7 @@ abstract class MultiNodeSpec( * the innermost enclosing `within` block or the passed `max` timeout. * * Note that the `max` timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". */ def enterBarrier(max: FiniteDuration, name: String*): Unit = testConductor.enter(Timeout.durationToTimeout(remainingOr(max.dilated)), name.to(immutable.Seq)) @@ -542,7 +542,7 @@ abstract class MultiNodeSpec( */ protected def startNewSystem(): ActorSystem = { val config = ConfigFactory - .parseString(s"akka.remote.classic.netty.tcp{port=${myAddress.port.get}\nhostname=${myAddress.host.get}}") + .parseString(s"pekko.remote.classic.netty.tcp{port=${myAddress.port.get}\nhostname=${myAddress.host.get}}") .withFallback(system.settings.config) val sys = ActorSystem(system.name, config) injectDeployments(sys, myself) diff --git a/akka-persistence-query/src/main/resources/reference.conf b/akka-persistence-query/src/main/resources/reference.conf index 4c29cc60b6..2a0a9809a0 100644 --- a/akka-persistence-query/src/main/resources/reference.conf +++ b/akka-persistence-query/src/main/resources/reference.conf @@ -1,5 +1,5 @@ ####################################################### -# Akka Persistence Query Reference Configuration File # +# Pekko Persistence Query Reference Configuration File # ####################################################### # This is the reference config file that contains all the default settings. @@ -7,14 +7,14 @@ #//#query-leveldb # Configuration for the LeveldbReadJournal -akka.persistence.query.journal.leveldb { +pekko.persistence.query.journal.leveldb { # Implementation class of the LevelDB ReadJournalProvider class = "org.apache.pekko.persistence.query.journal.leveldb.LeveldbReadJournalProvider" # Absolute path to the write journal plugin configuration entry that this # query journal will connect to. That must be a LeveldbJournal or SharedLeveldbJournal. # If undefined (or "") it will connect to the default journal as specified by the - # akka.persistence.journal.plugin property. + # pekko.persistence.journal.plugin property. write-plugin = "" # The LevelDB write journal is notifying the query side as soon as things @@ -28,13 +28,13 @@ akka.persistence.query.journal.leveldb { } #//#query-leveldb -akka.actor { +pekko.actor { serializers { - akka-persistence-query = "org.apache.pekko.persistence.query.internal.QuerySerializer" + pekko-persistence-query = "org.apache.pekko.persistence.query.internal.QuerySerializer" } serialization-bindings { - "org.apache.pekko.persistence.query.typed.EventEnvelope" = akka-persistence-query - "org.apache.pekko.persistence.query.Offset" = akka-persistence-query + "org.apache.pekko.persistence.query.typed.EventEnvelope" = pekko-persistence-query + "org.apache.pekko.persistence.query.Offset" = pekko-persistence-query } serialization-identifiers { "org.apache.pekko.persistence.query.internal.QuerySerializer" = 39 diff --git a/akka-persistence-query/src/main/scala/org/apache/pekko/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala b/akka-persistence-query/src/main/scala/org/apache/pekko/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala index 9019293a1e..958673ecbb 100644 --- a/akka-persistence-query/src/main/scala/org/apache/pekko/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala +++ b/akka-persistence-query/src/main/scala/org/apache/pekko/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala @@ -22,7 +22,7 @@ import pekko.stream.javadsl.Source * Corresponding Scala API is in [[pekko.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal]]. * * Configuration settings can be defined in the configuration section with the - * absolute path corresponding to the identifier, which is `"akka.persistence.query.journal.leveldb"` + * absolute path corresponding to the identifier, which is `"pekko.persistence.query.journal.leveldb"` * for the default [[LeveldbReadJournal#Identifier]]. See `reference.conf`. */ @deprecated("Use another journal implementation", "2.6.15") @@ -165,7 +165,7 @@ object LeveldbReadJournal { * The default identifier for [[LeveldbReadJournal]] to be used with * [[pekko.persistence.query.PersistenceQuery#getReadJournalFor]]. * - * The value is `"akka.persistence.query.journal.leveldb"` and corresponds + * The value is `"pekko.persistence.query.journal.leveldb"` and corresponds * to the absolute path to the read journal configuration entry. */ final val Identifier = pekko.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal.Identifier diff --git a/akka-persistence-query/src/main/scala/org/apache/pekko/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala b/akka-persistence-query/src/main/scala/org/apache/pekko/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala index 0bab74ea1c..408d230ce0 100644 --- a/akka-persistence-query/src/main/scala/org/apache/pekko/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala +++ b/akka-persistence-query/src/main/scala/org/apache/pekko/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala @@ -37,7 +37,7 @@ import pekko.util.ByteString * Corresponding Java API is in [[pekko.persistence.query.journal.leveldb.javadsl.LeveldbReadJournal]]. * * Configuration settings can be defined in the configuration section with the - * absolute path corresponding to the identifier, which is `"akka.persistence.query.journal.leveldb"` + * absolute path corresponding to the identifier, which is `"pekko.persistence.query.journal.leveldb"` * for the default [[LeveldbReadJournal#Identifier]]. See `reference.conf`. */ @deprecated("Use another journal implementation", "2.6.15") @@ -56,7 +56,7 @@ class LeveldbReadJournal(system: ExtendedActorSystem, config: Config) private val resolvedWriteJournalPluginId = if (writeJournalPluginId.isEmpty) - system.settings.config.getString("akka.persistence.journal.plugin") + system.settings.config.getString("pekko.persistence.journal.plugin") else writeJournalPluginId require( @@ -275,8 +275,8 @@ object LeveldbReadJournal { * The default identifier for [[LeveldbReadJournal]] to be used with * [[pekko.persistence.query.PersistenceQuery#readJournalFor]]. * - * The value is `"akka.persistence.query.journal.leveldb"` and corresponds + * The value is `"pekko.persistence.query.journal.leveldb"` and corresponds * to the absolute path to the read journal configuration entry. */ - final val Identifier = "akka.persistence.query.journal.leveldb" + final val Identifier = "pekko.persistence.query.journal.leveldb" } diff --git a/akka-persistence-query/src/test/java/org/apache/pekko/persistence/query/DummyJavaReadJournal.java b/akka-persistence-query/src/test/java/org/apache/pekko/persistence/query/DummyJavaReadJournal.java index 8076a4d9ff..6bdcba4050 100644 --- a/akka-persistence-query/src/test/java/org/apache/pekko/persistence/query/DummyJavaReadJournal.java +++ b/akka-persistence-query/src/test/java/org/apache/pekko/persistence/query/DummyJavaReadJournal.java @@ -13,7 +13,7 @@ import org.apache.pekko.stream.javadsl.Source; /** Use for tests only! Emits infinite stream of strings (representing queried for events). */ public class DummyJavaReadJournal implements ReadJournal, PersistenceIdsQuery { - public static final String Identifier = "akka.persistence.query.journal.dummy-java"; + public static final String Identifier = "pekko.persistence.query.journal.dummy-java"; @Override public Source persistenceIds() { diff --git a/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/DummyReadJournal.scala b/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/DummyReadJournal.scala index 018174312f..9e4d2b848f 100644 --- a/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/DummyReadJournal.scala +++ b/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/DummyReadJournal.scala @@ -22,7 +22,7 @@ class DummyReadJournal(val dummyValue: String) extends scaladsl.ReadJournal with } object DummyReadJournal { - final val Identifier = "akka.persistence.query.journal.dummy" + final val Identifier = "pekko.persistence.query.journal.dummy" } class DummyReadJournalForJava(readJournal: DummyReadJournal) diff --git a/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/PersistenceQuerySpec.scala b/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/PersistenceQuerySpec.scala index 4c76294e42..8691373df9 100644 --- a/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/PersistenceQuerySpec.scala +++ b/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/PersistenceQuerySpec.scala @@ -22,7 +22,7 @@ class PersistenceQuerySpec extends AnyWordSpecLike with Matchers with BeforeAndA val eventAdaptersConfig = s""" - |akka.persistence.query.journal.dummy { + |pekko.persistence.query.journal.dummy { | event-adapters { | adapt = ${classOf[PrefixStringWithPAdapter].getCanonicalName} | } diff --git a/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/AllPersistenceIdsSpec.scala b/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/AllPersistenceIdsSpec.scala index 86eacabcbf..1c47d3dbce 100644 --- a/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/AllPersistenceIdsSpec.scala +++ b/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/AllPersistenceIdsSpec.scala @@ -17,13 +17,13 @@ import scala.annotation.nowarn object AllPersistenceIdsSpec { val config = """ - akka.loglevel = INFO - akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" - akka.persistence.journal.leveldb.dir = "target/journal-AllPersistenceIdsSpec" - akka.test.single-expect-default = 10s + pekko.loglevel = INFO + pekko.persistence.journal.plugin = "pekko.persistence.journal.leveldb" + pekko.persistence.journal.leveldb.dir = "target/journal-AllPersistenceIdsSpec" + pekko.test.single-expect-default = 10s # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """ } diff --git a/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/Cleanup.scala b/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/Cleanup.scala index ef97dd4a43..782b671387 100644 --- a/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/Cleanup.scala +++ b/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/Cleanup.scala @@ -13,9 +13,9 @@ import org.apache.pekko.testkit.AkkaSpec trait Cleanup { this: AkkaSpec => val storageLocations = List( - "akka.persistence.journal.leveldb.dir", - "akka.persistence.journal.leveldb-shared.store.dir", - "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) + "pekko.persistence.journal.leveldb.dir", + "pekko.persistence.journal.leveldb-shared.store.dir", + "pekko.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) override protected def atStartup(): Unit = { storageLocations.foreach(FileUtils.deleteDirectory) diff --git a/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/EventsByPersistenceIdSpec.scala b/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/EventsByPersistenceIdSpec.scala index 57361f959a..5daddb4275 100644 --- a/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/EventsByPersistenceIdSpec.scala +++ b/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/EventsByPersistenceIdSpec.scala @@ -19,14 +19,14 @@ import scala.annotation.nowarn object EventsByPersistenceIdSpec { val config = """ - akka.loglevel = INFO - akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" - akka.persistence.journal.leveldb.dir = "target/journal-EventsByPersistenceIdSpec" - akka.test.single-expect-default = 10s - akka.persistence.query.journal.leveldb.refresh-interval = 1s + pekko.loglevel = INFO + pekko.persistence.journal.plugin = "pekko.persistence.journal.leveldb" + pekko.persistence.journal.leveldb.dir = "target/journal-EventsByPersistenceIdSpec" + pekko.test.single-expect-default = 10s + pekko.persistence.query.journal.leveldb.refresh-interval = 1s # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """ } diff --git a/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/EventsByTagSpec.scala b/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/EventsByTagSpec.scala index 43737e661b..c29ffb063b 100644 --- a/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/EventsByTagSpec.scala +++ b/akka-persistence-query/src/test/scala/org/apache/pekko/persistence/query/journal/leveldb/EventsByTagSpec.scala @@ -22,10 +22,10 @@ import scala.annotation.nowarn object EventsByTagSpec { val config = s""" - akka.loglevel = INFO - akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" + pekko.loglevel = INFO + pekko.persistence.journal.plugin = "pekko.persistence.journal.leveldb" - akka.persistence.journal.leveldb { + pekko.persistence.journal.leveldb { dir = "target/journal-EventsByTagSpec" event-adapters { color-tagger = org.apache.pekko.persistence.query.journal.leveldb.ColorTagger @@ -34,13 +34,13 @@ object EventsByTagSpec { "java.lang.String" = color-tagger } } - akka.persistence.query.journal.leveldb { + pekko.persistence.query.journal.leveldb { refresh-interval = 1s max-buffer-size = 2 } - akka.test.single-expect-default = 10s + pekko.test.single-expect-default = 10s - leveldb-no-refresh = $${akka.persistence.query.journal.leveldb} + leveldb-no-refresh = $${pekko.persistence.query.journal.leveldb} leveldb-no-refresh { refresh-interval = 10m } diff --git a/akka-persistence-shared/src/test/scala/org/apache/pekko/persistence/journal/leveldb/PersistencePluginProxySpec.scala b/akka-persistence-shared/src/test/scala/org/apache/pekko/persistence/journal/leveldb/PersistencePluginProxySpec.scala index 74c16ace12..b2da15e91c 100644 --- a/akka-persistence-shared/src/test/scala/org/apache/pekko/persistence/journal/leveldb/PersistencePluginProxySpec.scala +++ b/akka-persistence-shared/src/test/scala/org/apache/pekko/persistence/journal/leveldb/PersistencePluginProxySpec.scala @@ -15,23 +15,23 @@ import pekko.testkit.{ AkkaSpec, TestProbe } object PersistencePluginProxySpec { lazy val config = ConfigFactory.parseString(s""" - akka { + pekko { actor { provider = remote } persistence { journal { - plugin = "akka.persistence.journal.proxy" - proxy.target-journal-plugin = "akka.persistence.journal.inmem" + plugin = "pekko.persistence.journal.proxy" + proxy.target-journal-plugin = "pekko.persistence.journal.inmem" } snapshot-store { - plugin = "akka.persistence.snapshot-store.proxy" - proxy.target-snapshot-store-plugin = "akka.persistence.snapshot-store.local" + plugin = "pekko.persistence.snapshot-store.proxy" + proxy.target-snapshot-store-plugin = "pekko.persistence.snapshot-store.local" local.dir = target/snapshots-PersistencePluginProxySpec } } remote { - enabled-transports = ["akka.remote.classic.netty.tcp"] + enabled-transports = ["pekko.remote.classic.netty.tcp"] classic.netty.tcp { hostname = "127.0.0.1" port = 0 @@ -50,8 +50,8 @@ object PersistencePluginProxySpec { lazy val startTargetConfig = ConfigFactory.parseString(""" - |akka.extensions = ["org.apache.pekko.persistence.journal.PersistencePluginProxyExtension"] - |akka.persistence { + |pekko.extensions = ["org.apache.pekko.persistence.journal.PersistencePluginProxyExtension"] + |pekko.persistence { | journal.proxy.start-target-journal = on | snapshot-store.proxy.start-target-snapshot-store = on |} @@ -59,13 +59,13 @@ object PersistencePluginProxySpec { def targetAddressConfig(system: ActorSystem) = ConfigFactory.parseString(s""" - |akka.extensions = ["org.apache.pekko.persistence.Persistence"] - |akka.persistence.journal.auto-start-journals = [""] - |akka.persistence.journal.proxy.target-journal-address = "${system + |pekko.extensions = ["org.apache.pekko.persistence.Persistence"] + |pekko.persistence.journal.auto-start-journals = [""] + |pekko.persistence.journal.proxy.target-journal-address = "${system .asInstanceOf[ExtendedActorSystem] .provider .getDefaultAddress}" - |akka.persistence.snapshot-store.proxy.target-snapshot-store-address = "${system + |pekko.persistence.snapshot-store.proxy.target-snapshot-store-address = "${system .asInstanceOf[ExtendedActorSystem] .provider .getDefaultAddress}" diff --git a/akka-persistence-shared/src/test/scala/org/apache/pekko/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala b/akka-persistence-shared/src/test/scala/org/apache/pekko/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala index 78efdb17ca..94d489b089 100644 --- a/akka-persistence-shared/src/test/scala/org/apache/pekko/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala +++ b/akka-persistence-shared/src/test/scala/org/apache/pekko/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala @@ -13,17 +13,17 @@ import pekko.testkit.{ AkkaSpec, TestProbe } object SharedLeveldbJournalSpec { val config = ConfigFactory.parseString(s""" - akka { + pekko { actor { provider = remote } persistence { journal { - plugin = "akka.persistence.journal.leveldb-shared" + plugin = "pekko.persistence.journal.leveldb-shared" leveldb-shared.store.dir = target/journal-SharedLeveldbJournalSpec } snapshot-store { - plugin = "akka.persistence.snapshot-store.local" + plugin = "pekko.persistence.snapshot-store.local" local.dir = target/snapshots-SharedLeveldbJournalSpec } } @@ -90,7 +90,7 @@ class SharedLeveldbJournalSpec extends AkkaSpec(SharedLeveldbJournalSpec.config) val probeA = new TestProbe(systemA) val probeB = new TestProbe(systemB) - val storeConfig = system.settings.config.getConfig("akka.persistence.journal.leveldb-shared") + val storeConfig = system.settings.config.getConfig("pekko.persistence.journal.leveldb-shared") @nowarn val sharedLeveldbStoreCls = classOf[SharedLeveldbStore] system.actorOf(Props(sharedLeveldbStoreCls, storeConfig), "store") diff --git a/akka-persistence-shared/src/test/scala/org/apache/pekko/persistence/serialization/SerializerSpec.scala b/akka-persistence-shared/src/test/scala/org/apache/pekko/persistence/serialization/SerializerSpec.scala index 84f0c2e1ac..920dead0ce 100644 --- a/akka-persistence-shared/src/test/scala/org/apache/pekko/persistence/serialization/SerializerSpec.scala +++ b/akka-persistence-shared/src/test/scala/org/apache/pekko/persistence/serialization/SerializerSpec.scala @@ -24,7 +24,7 @@ import pekko.util.ByteString.UTF_8 object SerializerSpecConfigs { val customSerializers = ConfigFactory.parseString(""" - akka.actor { + pekko.actor { serializers { my-payload = "org.apache.pekko.persistence.serialization.MyPayloadSerializer" my-payload2 = "org.apache.pekko.persistence.serialization.MyPayload2Serializer" @@ -45,12 +45,12 @@ object SerializerSpecConfigs { """) val remote = ConfigFactory.parseString(""" - akka { + pekko { actor { provider = remote } remote { - enabled-transports = ["akka.remote.classic.netty.tcp"] + enabled-transports = ["pekko.remote.classic.netty.tcp"] classic.netty.tcp { hostname = "127.0.0.1" port = 0 diff --git a/akka-persistence-tck/src/main/scala/org/apache/pekko/persistence/journal/JournalPerfSpec.scala b/akka-persistence-tck/src/main/scala/org/apache/pekko/persistence/journal/JournalPerfSpec.scala index 9d757fdb40..a50e8fd8b7 100644 --- a/akka-persistence-tck/src/main/scala/org/apache/pekko/persistence/journal/JournalPerfSpec.scala +++ b/akka-persistence-tck/src/main/scala/org/apache/pekko/persistence/journal/JournalPerfSpec.scala @@ -98,7 +98,7 @@ object JournalPerfSpec { } private val cmdSerializerConfig = ConfigFactory.parseString(s""" - akka.actor { + pekko.actor { serializers { JournalPerfSpec = "${classOf[CmdSerializer].getName}" } diff --git a/akka-persistence-tck/src/main/scala/org/apache/pekko/persistence/journal/JournalSpec.scala b/akka-persistence-tck/src/main/scala/org/apache/pekko/persistence/journal/JournalSpec.scala index b679ee8a37..ec7dd676de 100644 --- a/akka-persistence-tck/src/main/scala/org/apache/pekko/persistence/journal/JournalSpec.scala +++ b/akka-persistence-tck/src/main/scala/org/apache/pekko/persistence/journal/JournalSpec.scala @@ -18,8 +18,8 @@ import pekko.util.unused object JournalSpec { val config: Config = ConfigFactory.parseString(s""" - akka.persistence.publish-plugin-commands = on - akka.actor { + pekko.persistence.publish-plugin-commands = on + pekko.actor { serializers { persistence-tck-test = "${classOf[TestSerializer].getName}" } diff --git a/akka-persistence-tck/src/main/scala/org/apache/pekko/persistence/snapshot/SnapshotStoreSpec.scala b/akka-persistence-tck/src/main/scala/org/apache/pekko/persistence/snapshot/SnapshotStoreSpec.scala index 700be19657..c6f9aaf77b 100644 --- a/akka-persistence-tck/src/main/scala/org/apache/pekko/persistence/snapshot/SnapshotStoreSpec.scala +++ b/akka-persistence-tck/src/main/scala/org/apache/pekko/persistence/snapshot/SnapshotStoreSpec.scala @@ -18,8 +18,8 @@ import pekko.testkit.TestProbe object SnapshotStoreSpec { val config: Config = ConfigFactory.parseString(s""" - akka.persistence.publish-plugin-commands = on - akka.actor { + pekko.persistence.publish-plugin-commands = on + pekko.actor { serializers { persistence-tck-test = "${classOf[TestSerializer].getName}" } diff --git a/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/PluginCleanup.scala b/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/PluginCleanup.scala index 3ed948fefc..1212b85874 100644 --- a/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/PluginCleanup.scala +++ b/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/PluginCleanup.scala @@ -11,7 +11,7 @@ import org.scalatest.BeforeAndAfterAll trait PluginCleanup extends BeforeAndAfterAll { self: PluginSpec => val storageLocations = - List("akka.persistence.journal.leveldb.dir", "akka.persistence.snapshot-store.local.dir").map(s => + List("pekko.persistence.journal.leveldb.dir", "pekko.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) override def beforeAll(): Unit = { diff --git a/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala b/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala index e190beeed8..cf55bab0cc 100644 --- a/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala +++ b/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala @@ -14,9 +14,9 @@ class LeveldbJournalJavaSpec "leveldb", "LeveldbJournalJavaSpec", extraConfig = Some(""" - akka.persistence.journal.leveldb.native = off - akka.actor.allow-java-serialization = off - akka.actor.warn-about-java-serializer-usage = on + pekko.persistence.journal.leveldb.native = off + pekko.actor.allow-java-serialization = off + pekko.actor.warn-about-java-serializer-usage = on """))) with PluginCleanup { diff --git a/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala b/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala index 96ac466c7c..66a9f8f3ff 100644 --- a/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala +++ b/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala @@ -14,9 +14,9 @@ class LeveldbJournalNativeSpec "leveldb", "LeveldbJournalNativeSpec", extraConfig = Some(""" - akka.persistence.journal.leveldb.native = on - akka.actor.allow-java-serialization = off - akka.actor.warn-about-java-serializer-usage = on + pekko.persistence.journal.leveldb.native = on + pekko.actor.allow-java-serialization = off + pekko.actor.warn-about-java-serializer-usage = on """))) with PluginCleanup { diff --git a/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala b/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala index 2cb53d40f2..cbb4559ee7 100644 --- a/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala +++ b/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala @@ -14,9 +14,9 @@ class LeveldbJournalNoAtomicPersistMultipleEventsSpec "leveldb", "LeveldbJournalNoAtomicPersistMultipleEventsSpec", extraConfig = Some(""" - akka.persistence.journal.leveldb.native = off - akka.actor.allow-java-serialization = off - akka.actor.warn-about-java-serializer-usage = on + pekko.persistence.journal.leveldb.native = off + pekko.actor.allow-java-serialization = off + pekko.actor.warn-about-java-serializer-usage = on """))) with PluginCleanup { diff --git a/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/snapshot/local/LocalSnapshotStoreSpec.scala b/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/snapshot/local/LocalSnapshotStoreSpec.scala index 49e76f1d6a..64b3420fc9 100644 --- a/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/snapshot/local/LocalSnapshotStoreSpec.scala +++ b/akka-persistence-tck/src/test/scala/org/apache/pekko/persistence/snapshot/local/LocalSnapshotStoreSpec.scala @@ -15,9 +15,9 @@ class LocalSnapshotStoreSpec extends SnapshotStoreSpec( config = ConfigFactory.parseString(""" - akka.test.timefactor = 3 - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/snapshots" + pekko.test.timefactor = 3 + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/snapshots" """)) with PluginCleanup { diff --git a/akka-persistence-testkit/src/main/resources/reference.conf b/akka-persistence-testkit/src/main/resources/reference.conf index 743553faf1..d43bc305a0 100644 --- a/akka-persistence-testkit/src/main/resources/reference.conf +++ b/akka-persistence-testkit/src/main/resources/reference.conf @@ -1,11 +1,11 @@ ################################################## -# Akka Persistence Testkit Reference Config File # +# Pekko Persistence Testkit Reference Config File # ################################################## # This is the reference config file that contains all the default settings. # Make your edits/overrides in your application.conf. -akka.persistence.testkit { +pekko.persistence.testkit { # configuration for persistence testkit for events events { @@ -29,10 +29,10 @@ akka.persistence.testkit { } -akka.persistence.testkit.query { +pekko.persistence.testkit.query { class = "org.apache.pekko.persistence.testkit.query.PersistenceTestKitReadJournalProvider" } -akka.persistence.testkit.state { +pekko.persistence.testkit.state { class = "org.apache.pekko.persistence.testkit.state.PersistenceTestKitDurableStateStoreProvider" } diff --git a/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/PersistenceTestKitPlugin.scala b/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/PersistenceTestKitPlugin.scala index 03e4d4f4e7..90652f4672 100644 --- a/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/PersistenceTestKitPlugin.scala +++ b/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/PersistenceTestKitPlugin.scala @@ -82,7 +82,7 @@ class PersistenceTestKitPlugin(@unused cfg: Config, cfgPath: String) extends Asy object PersistenceTestKitPlugin { - val PluginId = "akka.persistence.testkit.journal" + val PluginId = "pekko.persistence.testkit.journal" import pekko.util.ccompat.JavaConverters._ @@ -90,7 +90,7 @@ object PersistenceTestKitPlugin { val config: Config = ConfigFactory.parseMap( Map( - "akka.persistence.journal.plugin" -> PluginId, + "pekko.persistence.journal.plugin" -> PluginId, s"$PluginId.class" -> s"${classOf[PersistenceTestKitPlugin].getName}").asJava) private[testkit] case class Write(persistenceId: String, toSequenceNr: Long) @@ -123,7 +123,7 @@ class PersistenceTestKitSnapshotPlugin extends SnapshotStore { object PersistenceTestKitSnapshotPlugin { - val PluginId = "akka.persistence.testkit.snapshotstore.pluginid" + val PluginId = "pekko.persistence.testkit.snapshotstore.pluginid" import pekko.util.ccompat.JavaConverters._ @@ -131,7 +131,7 @@ object PersistenceTestKitSnapshotPlugin { val config: Config = ConfigFactory.parseMap( Map( - "akka.persistence.snapshot-store.plugin" -> PluginId, + "pekko.persistence.snapshot-store.plugin" -> PluginId, s"$PluginId.class" -> classOf[PersistenceTestKitSnapshotPlugin].getName, s"$PluginId.snapshot-is-optional" -> false // fallback isn't used by the testkit ).asJava) @@ -140,11 +140,11 @@ object PersistenceTestKitSnapshotPlugin { object PersistenceTestKitDurableStateStorePlugin { - val PluginId = "akka.persistence.testkit.state" + val PluginId = "pekko.persistence.testkit.state" import pekko.util.ccompat.JavaConverters._ def getInstance() = this - val config: Config = ConfigFactory.parseMap(Map("akka.persistence.state.plugin" -> PluginId).asJava) + val config: Config = ConfigFactory.parseMap(Map("pekko.persistence.state.plugin" -> PluginId).asJava) } diff --git a/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/internal/EventSourcedBehaviorTestKitImpl.scala b/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/internal/EventSourcedBehaviorTestKitImpl.scala index 777a2982eb..1272b48962 100644 --- a/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/internal/EventSourcedBehaviorTestKitImpl.scala +++ b/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/internal/EventSourcedBehaviorTestKitImpl.scala @@ -89,8 +89,8 @@ import pekko.stream.scaladsl.Sink import EventSourcedBehaviorTestKitImpl._ private def system: ActorSystem[_] = actorTestKit.system - if (system.settings.config.getBoolean("akka.persistence.testkit.events.serialize") || - system.settings.config.getBoolean("akka.persistence.testkit.snapshots.serialize")) { + if (system.settings.config.getBoolean("pekko.persistence.testkit.events.serialize") || + system.settings.config.getBoolean("pekko.persistence.testkit.snapshots.serialize")) { system.log.warn( "Persistence TestKit serialization enabled when using EventSourcedBehaviorTestKit, this is not intended. " + "make sure you create the system used in the test with the config from EventSourcedBehaviorTestKit.config " + @@ -101,7 +101,7 @@ import pekko.stream.scaladsl.Sink persistenceTestKit.clearAll() override val snapshotTestKit: Option[SnapshotTestKit] = - if (system.settings.config.getString("akka.persistence.snapshot-store.plugin") != "") + if (system.settings.config.getString("pekko.persistence.snapshot-store.plugin") != "") Some(SnapshotTestKit(system)) else None snapshotTestKit.foreach(_.clearAll()) diff --git a/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/query/javadsl/PersistenceTestKitReadJournal.scala b/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/query/javadsl/PersistenceTestKitReadJournal.scala index e7fba45d1e..c1f0bda712 100644 --- a/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/query/javadsl/PersistenceTestKitReadJournal.scala +++ b/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/query/javadsl/PersistenceTestKitReadJournal.scala @@ -21,7 +21,7 @@ import pekko.stream.javadsl.Source import pekko.persistence.testkit.query.scaladsl object PersistenceTestKitReadJournal { - val Identifier = "akka.persistence.testkit.query" + val Identifier = "pekko.persistence.testkit.query" } final class PersistenceTestKitReadJournal(delegate: scaladsl.PersistenceTestKitReadJournal) diff --git a/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala b/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala index 1c78b64753..a602e61c5e 100644 --- a/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala +++ b/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala @@ -32,7 +32,7 @@ import pekko.persistence.typed.PersistenceId import scala.collection.immutable object PersistenceTestKitReadJournal { - val Identifier = "akka.persistence.testkit.query" + val Identifier = "pekko.persistence.testkit.query" } final class PersistenceTestKitReadJournal(system: ExtendedActorSystem, @unused config: Config, configPath: String) diff --git a/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/scaladsl/EventSourcedBehaviorTestKit.scala b/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/scaladsl/EventSourcedBehaviorTestKit.scala index f6e650a15d..84110db598 100644 --- a/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/scaladsl/EventSourcedBehaviorTestKit.scala +++ b/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/scaladsl/EventSourcedBehaviorTestKit.scala @@ -36,8 +36,8 @@ object EventSourcedBehaviorTestKit { * journal and snapshot storage. */ val config: Config = ConfigFactory.parseString(""" - akka.persistence.testkit.events.serialize = off - akka.persistence.testkit.snapshots.serialize = off + pekko.persistence.testkit.events.serialize = off + pekko.persistence.testkit.snapshots.serialize = off """).withFallback(PersistenceTestKitPlugin.config).withFallback(PersistenceTestKitSnapshotPlugin.config) object SerializationSettings { diff --git a/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/scaladsl/PersistenceTestKit.scala b/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/scaladsl/PersistenceTestKit.scala index bcd9c4d4dd..c5506d8b68 100644 --- a/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/scaladsl/PersistenceTestKit.scala +++ b/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/scaladsl/PersistenceTestKit.scala @@ -405,7 +405,7 @@ object SnapshotTestKit { object Settings extends ExtensionId[Settings] { - val configPath = "akka.persistence.testkit.snapshots" + val configPath = "pekko.persistence.testkit.snapshots" override def createExtension(system: ExtendedActorSystem): Settings = new Settings(system.settings.config.getConfig(configPath)) @@ -519,7 +519,7 @@ object PersistenceTestKit { object Settings extends ExtensionId[Settings] { - val configPath = "akka.persistence.testkit.events" + val configPath = "pekko.persistence.testkit.events" override def get(system: ActorSystem): Settings = super.get(system) diff --git a/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStore.scala b/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStore.scala index 22049a5198..8d8f40c63e 100644 --- a/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStore.scala +++ b/akka-persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStore.scala @@ -32,7 +32,7 @@ import pekko.stream.OverflowStrategy import scala.collection.immutable object PersistenceTestKitDurableStateStore { - val Identifier = "akka.persistence.testkit.state" + val Identifier = "pekko.persistence.testkit.state" } class PersistenceTestKitDurableStateStore[A](val system: ExtendedActorSystem) diff --git a/akka-persistence-testkit/src/test/resources/application.conf b/akka-persistence-testkit/src/test/resources/application.conf index 9f9f06e2c1..0a0043059b 100644 --- a/akka-persistence-testkit/src/test/resources/application.conf +++ b/akka-persistence-testkit/src/test/resources/application.conf @@ -1,4 +1,4 @@ -akka.persistence.testkit { +pekko.persistence.testkit { events { assert-timeout = 500ms assert-poll-interval = 40millis diff --git a/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/CommonUtils.scala b/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/CommonUtils.scala index 2dddcafe92..8567c9fe16 100644 --- a/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/CommonUtils.scala +++ b/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/CommonUtils.scala @@ -30,10 +30,10 @@ trait CommonUtils extends AnyWordSpecLike with TestKitBase { Map( // testing serialization of the events when persisting in the storage // using default java serializers for convenience - "akka.actor.allow-java-serialization" -> true, - "akka.persistence.testkit.events.serialize" -> serializeMessages, - "akka.persistence.testkit.snapshots.serialize" -> serializeSnapshots).asJava)) - .withFallback(ConfigFactory.parseString("akka.loggers = [\"org.apache.pekko.testkit.TestEventListener\"]")) + "pekko.actor.allow-java-serialization" -> true, + "pekko.persistence.testkit.events.serialize" -> serializeMessages, + "pekko.persistence.testkit.snapshots.serialize" -> serializeSnapshots).asJava)) + .withFallback(ConfigFactory.parseString("pekko.loggers = [\"org.apache.pekko.testkit.TestEventListener\"]")) .withFallback(ConfigFactory.defaultApplication())) } diff --git a/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/query/EventsByPersistenceIdSpec.scala b/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/query/EventsByPersistenceIdSpec.scala index 36c2618e0a..e238a9919a 100644 --- a/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/query/EventsByPersistenceIdSpec.scala +++ b/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/query/EventsByPersistenceIdSpec.scala @@ -22,9 +22,9 @@ import scala.concurrent.duration._ object EventsByPersistenceIdSpec { val config = PersistenceTestKitPlugin.config.withFallback( ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.persistence.testkit.events.serialize = off + pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.persistence.testkit.events.serialize = off """)) case class Command(evt: String, ack: ActorRef[Done]) diff --git a/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/scaladsl/EventSourcedBehaviorNoSnapshotTestKitSpec.scala b/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/scaladsl/EventSourcedBehaviorNoSnapshotTestKitSpec.scala index 80e139bc22..564d27ee42 100644 --- a/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/scaladsl/EventSourcedBehaviorNoSnapshotTestKitSpec.scala +++ b/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/scaladsl/EventSourcedBehaviorNoSnapshotTestKitSpec.scala @@ -15,8 +15,8 @@ import org.scalatest.wordspec.AnyWordSpecLike class EventSourcedBehaviorNoSnapshotTestKitSpec extends ScalaTestWithActorTestKit(ConfigFactory.parseString(""" - akka.persistence.testkit.events.serialize = off - akka.persistence.testkit.snapshots.serialize = off + pekko.persistence.testkit.events.serialize = off + pekko.persistence.testkit.snapshots.serialize = off """).withFallback(PersistenceTestKitPlugin.config)) with AnyWordSpecLike with LogCapturing { diff --git a/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/scaladsl/MultipleJournalsSpec.scala b/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/scaladsl/MultipleJournalsSpec.scala index a56d41e1e5..218b95135d 100644 --- a/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/scaladsl/MultipleJournalsSpec.scala +++ b/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/scaladsl/MultipleJournalsSpec.scala @@ -50,11 +50,11 @@ object MultipleJournalsSpec { journal1 { # journal and query expected to be next to each other under config path journal.class = "${classOf[PersistenceTestKitPlugin].getName}" - query = $${akka.persistence.testkit.query} + query = $${pekko.persistence.testkit.query} } journal2 { journal.class = "${classOf[PersistenceTestKitPlugin].getName}" - query = $${akka.persistence.testkit.query} + query = $${pekko.persistence.testkit.query} } """).withFallback(ConfigFactory.load()).resolve() diff --git a/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStoreSpec.scala b/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStoreSpec.scala index c084c3347f..5d2f7725f0 100644 --- a/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStoreSpec.scala +++ b/akka-persistence-testkit/src/test/scala/org/apache/pekko/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStoreSpec.scala @@ -21,7 +21,7 @@ import org.scalatest.wordspec.AnyWordSpecLike object PersistenceTestKitDurableStateStoreSpec { val config = PersistenceTestKitDurableStateStorePlugin.config.withFallback(ConfigFactory.parseString(""" - akka.loglevel = DEBUG + pekko.loglevel = DEBUG """)) case class Record(id: Int, name: String) } diff --git a/akka-persistence-typed-tests/src/test/java/org/apache/pekko/persistence/typed/ReplicatedEventSourcingTest.java b/akka-persistence-typed-tests/src/test/java/org/apache/pekko/persistence/typed/ReplicatedEventSourcingTest.java index 6621dfdf5e..a20922f4ef 100644 --- a/akka-persistence-typed-tests/src/test/java/org/apache/pekko/persistence/typed/ReplicatedEventSourcingTest.java +++ b/akka-persistence-typed-tests/src/test/java/org/apache/pekko/persistence/typed/ReplicatedEventSourcingTest.java @@ -146,8 +146,8 @@ public class ReplicatedEventSourcingTest extends JUnitSuite { public static final TestKitJunitResource testKit = new TestKitJunitResource( ConfigFactory.parseString( - "akka.loglevel = INFO\n" - + "akka.loggers = [\"org.apache.pekko.testkit.TestEventListener\"]") + "pekko.loglevel = INFO\n" + + "pekko.loggers = [\"org.apache.pekko.testkit.TestEventListener\"]") .withFallback(PersistenceTestKitPlugin.getInstance().config())); @Rule public final LogCapturing logCapturing = new LogCapturing(); diff --git a/akka-persistence-typed-tests/src/test/java/org/apache/pekko/persistence/typed/javadsl/EventSourcedBehaviorJavaDslTest.java b/akka-persistence-typed-tests/src/test/java/org/apache/pekko/persistence/typed/javadsl/EventSourcedBehaviorJavaDslTest.java index 4cdb9b7753..6a082b68ee 100644 --- a/akka-persistence-typed-tests/src/test/java/org/apache/pekko/persistence/typed/javadsl/EventSourcedBehaviorJavaDslTest.java +++ b/akka-persistence-typed-tests/src/test/java/org/apache/pekko/persistence/typed/javadsl/EventSourcedBehaviorJavaDslTest.java @@ -48,8 +48,8 @@ public class EventSourcedBehaviorJavaDslTest extends JUnitSuite { public static final TestKitJunitResource testKit = new TestKitJunitResource( ConfigFactory.parseString( - "akka.loglevel = INFO\n" - + "akka.loggers = [\"org.apache.pekko.testkit.TestEventListener\"]") + "pekko.loglevel = INFO\n" + + "pekko.loggers = [\"org.apache.pekko.testkit.TestEventListener\"]") .withFallback(PersistenceTestKitPlugin.getInstance().config()) .withFallback(PersistenceTestKitSnapshotPlugin.config())); diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/EventSourcedBehaviorLoggingSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/EventSourcedBehaviorLoggingSpec.scala index 0dcb43d6e9..c56c5c0755 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/EventSourcedBehaviorLoggingSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/EventSourcedBehaviorLoggingSpec.scala @@ -127,7 +127,7 @@ class EventSourcedBehaviorLoggingInternalLoggerSpec object EventSourcedBehaviorLoggingContextLoggerSpec { val config = ConfigFactory - .parseString("akka.persistence.typed.use-context-logger-for-internal-logging = true") + .parseString("pekko.persistence.typed.use-context-logger-for-internal-logging = true") .withFallback(PersistenceTestKitPlugin.config) } class EventSourcedBehaviorLoggingContextLoggerSpec diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/MultiJournalReplicationSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/MultiJournalReplicationSpec.scala index 5c197345de..5e8339d1b0 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/MultiJournalReplicationSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/MultiJournalReplicationSpec.scala @@ -56,11 +56,11 @@ object MultiJournalReplicationSpec { def separateJournalsConfig: Config = ConfigFactory.parseString(s""" journal1 { journal.class = "${classOf[PersistenceTestKitPlugin].getName}" - query = $${akka.persistence.testkit.query} + query = $${pekko.persistence.testkit.query} } journal2 { journal.class = "${classOf[PersistenceTestKitPlugin].getName}" - query = $${akka.persistence.testkit.query} + query = $${pekko.persistence.testkit.query} } """).withFallback(ConfigFactory.load()).resolve() diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala index e8d54b5eae..cf2469bebc 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala @@ -71,9 +71,9 @@ class ChaosJournal extends InmemJournal { object EventSourcedBehaviorFailureSpec { val conf: Config = ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.persistence.journal.plugin = "failure-journal" - failure-journal = $${akka.persistence.journal.inmem} + pekko.loglevel = INFO + pekko.persistence.journal.plugin = "failure-journal" + failure-journal = $${pekko.persistence.journal.inmem} failure-journal { class = "org.apache.pekko.persistence.typed.scaladsl.ChaosJournal" } diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorInterceptorSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorInterceptorSpec.scala index 42b69599e3..38046064ca 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorInterceptorSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorInterceptorSpec.scala @@ -24,9 +24,9 @@ object EventSourcedBehaviorInterceptorSpec { val journalId = "event-sourced-behavior-interceptor-spec" def config: Config = ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.journal.inmem.test-serialization = on + pekko.loglevel = INFO + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.journal.inmem.test-serialization = on """) def testBehavior(persistenceId: PersistenceId, probe: ActorRef[String]): Behavior[String] = diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorRecoveryTimeoutSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorRecoveryTimeoutSpec.scala index 5f06c0ba97..fd2310a173 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorRecoveryTimeoutSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorRecoveryTimeoutSpec.scala @@ -29,10 +29,10 @@ object EventSourcedBehaviorRecoveryTimeoutSpec { SteppingInmemJournal .config(journalId) .withFallback(ConfigFactory.parseString(""" - akka.persistence.journal.stepping-inmem.recovery-event-timeout=1s + pekko.persistence.journal.stepping-inmem.recovery-event-timeout=1s """)) .withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO + pekko.loglevel = INFO """)) def testBehavior(persistenceId: PersistenceId, probe: ActorRef[AnyRef]): Behavior[String] = diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala index d073364eac..12ce50d84a 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala @@ -79,10 +79,10 @@ object EventSourcedBehaviorSpec { // also used from PersistentActorTest, EventSourcedBehaviorWatchSpec def conf: Config = PersistenceTestKitPlugin.config.withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO - # akka.persistence.typed.log-stashing = on - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/typed-persistence-${UUID.randomUUID().toString}" + pekko.loglevel = INFO + # pekko.persistence.typed.log-stashing = on + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/typed-persistence-${UUID.randomUUID().toString}" slow-snapshot-store.class = "${classOf[SlowInMemorySnapshotStore].getName}" short-recovery-timeout { @@ -670,7 +670,7 @@ class EventSourcedBehaviorSpec val testkit2 = ActorTestKit( ActorTestKitBase.testNameFromCallStack(), ConfigFactory.parseString(s""" - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" """)) try { LoggingTestKit @@ -692,7 +692,7 @@ class EventSourcedBehaviorSpec val testkit2 = ActorTestKit( ActorTestKitBase.testNameFromCallStack(), ConfigFactory.parseString(s""" - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" """)) try { LoggingTestKit diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala index e8edb1e322..355259c21a 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala @@ -32,13 +32,13 @@ import scala.concurrent.duration._ object EventSourcedBehaviorStashSpec { def conf: Config = ConfigFactory.parseString(s""" - #akka.loglevel = DEBUG - #akka.persistence.typed.log-stashing = on - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.journal.plugin = "failure-journal" + #pekko.loglevel = DEBUG + #pekko.persistence.typed.log-stashing = on + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.journal.plugin = "failure-journal" # tune it down a bit so we can hit limit - akka.persistence.typed.stash-capacity = 500 - failure-journal = $${akka.persistence.journal.inmem} + pekko.persistence.typed.stash-capacity = 500 + failure-journal = $${pekko.persistence.journal.inmem} failure-journal { class = "org.apache.pekko.persistence.typed.scaladsl.ChaosJournal" } @@ -573,7 +573,7 @@ class EventSourcedBehaviorStashSpec c ! "start-stashing" - val limit = system.settings.config.getInt("akka.persistence.typed.stash-capacity") + val limit = system.settings.config.getInt("pekko.persistence.typed.stash-capacity") LoggingTestKit.warn("Stash buffer is full, dropping message").expect { (0 to limit).foreach { n => c ! s"cmd-$n" // limit triggers overflow @@ -597,7 +597,7 @@ class EventSourcedBehaviorStashSpec val failStashTestKit = ActorTestKit( "EventSourcedBehaviorStashSpec-stash-overflow-fail", ConfigFactory - .parseString("akka.persistence.typed.stash-overflow-strategy=fail") + .parseString("pekko.persistence.typed.stash-overflow-strategy=fail") .withFallback(EventSourcedBehaviorStashSpec.conf)) try { val probe = failStashTestKit.createTestProbe[AnyRef]() @@ -623,7 +623,7 @@ class EventSourcedBehaviorStashSpec LoggingTestKit .error[StashOverflowException] .expect { - val limit = system.settings.config.getInt("akka.persistence.typed.stash-capacity") + val limit = system.settings.config.getInt("pekko.persistence.typed.stash-capacity") (0 to limit).foreach { n => c ! s"cmd-$n" // limit triggers overflow } diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedEventAdapterSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedEventAdapterSpec.scala index 4c30ce68c9..111c476908 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedEventAdapterSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedEventAdapterSpec.scala @@ -78,7 +78,7 @@ object EventSourcedEventAdapterSpec { class EventSourcedEventAdapterSpec extends ScalaTestWithActorTestKit(ConfigFactory.parseString(""" - akka.persistence.testkit.events.serialize = true""").withFallback(PersistenceTestKitPlugin.config)) + pekko.persistence.testkit.events.serialize = true""").withFallback(PersistenceTestKitPlugin.config)) with AnyWordSpecLike with LogCapturing { import EventSourcedBehaviorSpec._ diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala index e98b2f7b4c..1aaf256799 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala @@ -19,9 +19,9 @@ import org.scalatest.wordspec.AnyWordSpecLike object EventSourcedSequenceNumberSpec { private val conf = ConfigFactory.parseString(s""" - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.journal.inmem.test-serialization = on - akka.persistence.snapshot-store.plugin = "slow-snapshot-store" + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.journal.inmem.test-serialization = on + pekko.persistence.snapshot-store.plugin = "slow-snapshot-store" slow-snapshot-store.class = "${classOf[SlowInMemorySnapshotStore].getName}" """) diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedStashOverflowSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedStashOverflowSpec.scala index e5517f6278..95abfcc7e8 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedStashOverflowSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/EventSourcedStashOverflowSpec.scala @@ -44,7 +44,7 @@ object EventSourcedStashOverflowSpec { def conf = SteppingInmemJournal.config("EventSourcedStashOverflow").withFallback(ConfigFactory.parseString(s""" - akka.persistence { + pekko.persistence { typed { stash-capacity = 1000 # enough to fail on stack size stash-overflow-strategy = "drop" @@ -71,7 +71,7 @@ class EventSourcedStashOverflowSpec val journal = SteppingInmemJournal.getRef("EventSourcedStashOverflow") val droppedMessageProbe = testKit.createDroppedMessageProbe() - val stashCapacity = testKit.config.getInt("akka.persistence.typed.stash-capacity") + val stashCapacity = testKit.config.getInt("pekko.persistence.typed.stash-capacity") for (_ <- 0 to (stashCapacity * 2)) { es.tell(EventSourcedStringList.DoNothing(probe.ref)) diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/NullEmptyStateSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/NullEmptyStateSpec.scala index 0fb537b482..1230d57499 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/NullEmptyStateSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/NullEmptyStateSpec.scala @@ -17,8 +17,8 @@ import org.scalatest.wordspec.AnyWordSpecLike object NullEmptyStateSpec { private val conf = ConfigFactory.parseString(s""" - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.journal.inmem.test-serialization = on + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.journal.inmem.test-serialization = on """) } diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala index 5f442f3ae5..8caac73e37 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala @@ -38,17 +38,17 @@ object OptionalSnapshotStoreSpec { }).snapshotWhen { case _ => true } def persistentBehaviorWithSnapshotPlugin(probe: TestProbe[State]) = - persistentBehavior(probe).withSnapshotPluginId("akka.persistence.snapshot-store.local") + persistentBehavior(probe).withSnapshotPluginId("pekko.persistence.snapshot-store.local") } class OptionalSnapshotStoreSpec extends ScalaTestWithActorTestKit(s""" - akka.persistence.publish-plugin-commands = on - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.journal.inmem.test-serialization = on + pekko.persistence.publish-plugin-commands = on + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.journal.inmem.test-serialization = on # snapshot store plugin is NOT defined, things should still work - akka.persistence.snapshot-store.local.dir = "target/snapshots-${classOf[OptionalSnapshotStoreSpec].getName}/" + pekko.persistence.snapshot-store.local.dir = "target/snapshots-${classOf[OptionalSnapshotStoreSpec].getName}/" """) with AnyWordSpecLike with LogCapturing { import OptionalSnapshotStoreSpec._ diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/PerformanceSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/PerformanceSpec.scala index 95e5837db2..349061743e 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/PerformanceSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/PerformanceSpec.scala @@ -27,11 +27,11 @@ object PerformanceSpec { val config = """ - akka.persistence.performance.cycles.load = 100 + pekko.persistence.performance.cycles.load = 100 # more accurate throughput measurements - #akka.persistence.performance.cycles.load = 10000 + #pekko.persistence.performance.cycles.load = 10000 # no stash capacity limit - akka.persistence.typed.stash-capacity = 1000000 + pekko.persistence.typed.stash-capacity = 1000000 """ sealed trait Command @@ -117,8 +117,8 @@ class PerformanceSpec PersistenceTestKitPlugin.config .withFallback(PersistenceTestKitSnapshotPlugin.config) .withFallback(ConfigFactory.parseString(s""" - akka.persistence.publish-plugin-commands = on - akka.actor.testkit.typed.single-expect-default = 10s + pekko.persistence.publish-plugin-commands = on + pekko.actor.testkit.typed.single-expect-default = 10s """)) .withFallback(ConfigFactory.parseString(PerformanceSpec.config))) with AnyWordSpecLike @@ -126,7 +126,7 @@ class PerformanceSpec import PerformanceSpec._ - val loadCycles = system.settings.config.getInt("akka.persistence.performance.cycles.load") + val loadCycles = system.settings.config.getInt("pekko.persistence.performance.cycles.load") def stressPersistentActor( persistentActor: ActorRef[Command], diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/PrimitiveStateSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/PrimitiveStateSpec.scala index 5a1b7d433b..a97017b1c6 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/PrimitiveStateSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/PrimitiveStateSpec.scala @@ -16,8 +16,8 @@ import org.scalatest.wordspec.AnyWordSpecLike object PrimitiveStateSpec { private val conf = ConfigFactory.parseString(s""" - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.journal.inmem.test-serialization = on + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.journal.inmem.test-serialization = on """) } diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/SnapshotMutableStateSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/SnapshotMutableStateSpec.scala index 459b5be384..0bb985a3b1 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/SnapshotMutableStateSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/SnapshotMutableStateSpec.scala @@ -22,8 +22,8 @@ import java.util.concurrent.atomic.AtomicInteger object SnapshotMutableStateSpec { def conf: Config = PersistenceTestKitPlugin.config.withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.persistence.snapshot-store.plugin = "slow-snapshot-store" + pekko.loglevel = INFO + pekko.persistence.snapshot-store.plugin = "slow-snapshot-store" slow-snapshot-store.class = "${classOf[SlowInMemorySnapshotStore].getName}" """)) diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/SnapshotRecoveryWithEmptyJournalSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/SnapshotRecoveryWithEmptyJournalSpec.scala index 3b2c1bc0a2..885284a76f 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/SnapshotRecoveryWithEmptyJournalSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/SnapshotRecoveryWithEmptyJournalSpec.scala @@ -27,11 +27,11 @@ object SnapshotRecoveryWithEmptyJournalSpec { val survivingSnapshotPath = s"target/survivingSnapshotPath-${UUID.randomUUID().toString}" def conf: Config = PersistenceTestKitPlugin.config.withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "${SnapshotRecoveryWithEmptyJournalSpec.survivingSnapshotPath}" - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.loglevel = INFO + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "${SnapshotRecoveryWithEmptyJournalSpec.survivingSnapshotPath}" + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """)) object TestActor { diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateBehaviorInterceptorSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateBehaviorInterceptorSpec.scala index 880ce969ab..1f79cd05f7 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateBehaviorInterceptorSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateBehaviorInterceptorSpec.scala @@ -22,7 +22,7 @@ import pekko.persistence.testkit.PersistenceTestKitDurableStateStorePlugin object DurableStateBehaviorInterceptorSpec { def conf: Config = PersistenceTestKitDurableStateStorePlugin.config.withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO + pekko.loglevel = INFO """)) def testBehavior(persistenceId: PersistenceId, probe: ActorRef[String]): Behavior[String] = diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateBehaviorReplySpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateBehaviorReplySpec.scala index 007b8841de..62ce1dbb45 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateBehaviorReplySpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateBehaviorReplySpec.scala @@ -24,7 +24,7 @@ import pekko.persistence.testkit.PersistenceTestKitDurableStateStorePlugin object DurableStateBehaviorReplySpec { def conf: Config = PersistenceTestKitDurableStateStorePlugin.config.withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO + pekko.loglevel = INFO """)) sealed trait Command[ReplyMessage] extends CborSerializable diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateBehaviorTimersSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateBehaviorTimersSpec.scala index 1eacd48965..cb270da0c9 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateBehaviorTimersSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateBehaviorTimersSpec.scala @@ -22,7 +22,7 @@ import pekko.persistence.testkit.PersistenceTestKitDurableStateStorePlugin object DurableStateBehaviorTimersSpec { def conf: Config = PersistenceTestKitDurableStateStorePlugin.config.withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO + pekko.loglevel = INFO """)) def testBehavior(persistenceId: PersistenceId, probe: ActorRef[String]): Behavior[String] = diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateRevisionSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateRevisionSpec.scala index 9fbc5316fa..7c02f5b556 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateRevisionSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/DurableStateRevisionSpec.scala @@ -22,7 +22,7 @@ import pekko.persistence.typed.state.RecoveryCompleted object DurableStateRevisionSpec { def conf: Config = PersistenceTestKitDurableStateStorePlugin.config.withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO + pekko.loglevel = INFO """)) } diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/NullEmptyStateSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/NullEmptyStateSpec.scala index 8c3aa68058..0ec58f0ba5 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/NullEmptyStateSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/NullEmptyStateSpec.scala @@ -19,7 +19,7 @@ import pekko.persistence.testkit.PersistenceTestKitDurableStateStorePlugin object NullEmptyStateSpec { def conf: Config = PersistenceTestKitDurableStateStorePlugin.config.withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO + pekko.loglevel = INFO """)) } diff --git a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/PrimitiveStateSpec.scala b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/PrimitiveStateSpec.scala index c688fb8746..103a4a8644 100644 --- a/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/PrimitiveStateSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/org/apache/pekko/persistence/typed/state/scaladsl/PrimitiveStateSpec.scala @@ -18,7 +18,7 @@ import pekko.persistence.testkit.PersistenceTestKitDurableStateStorePlugin object PrimitiveStateSpec { def conf: Config = PersistenceTestKitDurableStateStorePlugin.config.withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO + pekko.loglevel = INFO """)) } diff --git a/akka-persistence-typed/src/main/resources/reference.conf b/akka-persistence-typed/src/main/resources/reference.conf index 1f02e84d32..5f2c6d3564 100644 --- a/akka-persistence-typed/src/main/resources/reference.conf +++ b/akka-persistence-typed/src/main/resources/reference.conf @@ -1,4 +1,4 @@ -akka.actor { +pekko.actor { serialization-identifiers."org.apache.pekko.persistence.typed.serialization.ReplicatedEventSourcingSerializer" = 40 @@ -16,7 +16,7 @@ akka.actor { } } -akka.persistence.typed { +pekko.persistence.typed { # Persistent actors stash while recovering or persisting events, # this setting configures the default capacity of this stash. @@ -47,7 +47,7 @@ akka.persistence.typed { use-context-logger-for-internal-logging = false } -akka.reliable-delivery { +pekko.reliable-delivery { producer-controller { event-sourced-durable-queue { # Max duration for the exponential backoff for persist failures. @@ -66,11 +66,11 @@ akka.reliable-delivery { cleanup-unused-after = 3600s # The journal plugin to use, by default it will use the plugin configured by - # `akka.persistence.journal.plugin`. + # `pekko.persistence.journal.plugin`. journal-plugin-id = "" # The journal plugin to use, by default it will use the plugin configured by - # `akka.persistence.snapshot-store.plugin`. + # `pekko.persistence.snapshot-store.plugin`. snapshot-plugin-id = "" } } diff --git a/akka-persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/delivery/EventSourcedProducerQueue.scala b/akka-persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/delivery/EventSourcedProducerQueue.scala index 427cdadde0..0bf84c0ff8 100644 --- a/akka-persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/delivery/EventSourcedProducerQueue.scala +++ b/akka-persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/delivery/EventSourcedProducerQueue.scala @@ -40,15 +40,15 @@ object EventSourcedProducerQueue { object Settings { /** - * Scala API: Factory method from config `akka.reliable-delivery.producer-controller.event-sourced-durable-queue` + * Scala API: Factory method from config `pekko.reliable-delivery.producer-controller.event-sourced-durable-queue` * of the `ActorSystem`. */ def apply(system: ActorSystem[_]): Settings = - apply(system.settings.config.getConfig("akka.reliable-delivery.producer-controller.event-sourced-durable-queue")) + apply(system.settings.config.getConfig("pekko.reliable-delivery.producer-controller.event-sourced-durable-queue")) /** * Scala API: Factory method from Config corresponding to - * `akka.reliable-delivery.producer-controller.event-sourced-durable-queue`. + * `pekko.reliable-delivery.producer-controller.event-sourced-durable-queue`. */ def apply(config: Config): Settings = { new Settings( @@ -62,7 +62,7 @@ object EventSourcedProducerQueue { } /** - * Java API: Factory method from config `akka.reliable-delivery.producer-controller.event-sourced-durable-queue` + * Java API: Factory method from config `pekko.reliable-delivery.producer-controller.event-sourced-durable-queue` * of the `ActorSystem`. */ def create(system: ActorSystem[_]): Settings = @@ -70,7 +70,7 @@ object EventSourcedProducerQueue { /** * Java API: Factory method from Config corresponding to - * `akka.reliable-delivery.producer-controller.event-sourced-durable-queue`. + * `pekko.reliable-delivery.producer-controller.event-sourced-durable-queue`. */ def create(config: Config): Settings = apply(config) diff --git a/akka-persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/internal/EventSourcedSettings.scala b/akka-persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/internal/EventSourcedSettings.scala index 36e48a1258..a21c56feb4 100644 --- a/akka-persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/internal/EventSourcedSettings.scala +++ b/akka-persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/internal/EventSourcedSettings.scala @@ -24,7 +24,7 @@ import pekko.persistence.Persistence apply(system.settings.config, journalPluginId, snapshotPluginId) def apply(config: Config, journalPluginId: String, snapshotPluginId: String): EventSourcedSettings = { - val typedConfig = config.getConfig("akka.persistence.typed") + val typedConfig = config.getConfig("pekko.persistence.typed") val stashOverflowStrategy = typedConfig.getString("stash-overflow-strategy").toLowerCase match { case "drop" => StashOverflowStrategy.Drop @@ -58,7 +58,7 @@ import pekko.persistence.Persistence private def journalConfigFor(config: Config, journalPluginId: String): Config = { def defaultJournalPluginId = { - val configPath = config.getString("akka.persistence.journal.plugin") + val configPath = config.getString("pekko.persistence.journal.plugin") Persistence.verifyPluginConfigIsDefined(configPath, "Default journal") configPath } diff --git a/akka-persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/state/internal/DurableStateSettings.scala b/akka-persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/state/internal/DurableStateSettings.scala index 149fc167f3..3b91d2c10c 100644 --- a/akka-persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/state/internal/DurableStateSettings.scala +++ b/akka-persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/state/internal/DurableStateSettings.scala @@ -25,7 +25,7 @@ import pekko.persistence.Persistence apply(system.settings.config, durableStateStorePluginId) def apply(config: Config, durableStateStorePluginId: String): DurableStateSettings = { - val typedConfig = config.getConfig("akka.persistence.typed") + val typedConfig = config.getConfig("pekko.persistence.typed") val stashOverflowStrategy = typedConfig.getString("stash-overflow-strategy").toLowerCase match { case "drop" => StashOverflowStrategy.Drop @@ -56,14 +56,14 @@ import pekko.persistence.Persistence private def durableStateStoreConfigFor(config: Config, pluginId: String): Config = { def defaultPluginId = { - val configPath = config.getString("akka.persistence.state.plugin") + val configPath = config.getString("pekko.persistence.state.plugin") Persistence.verifyPluginConfigIsDefined(configPath, "Default DurableStateStore") configPath } val configPath = if (pluginId == "") defaultPluginId else pluginId Persistence.verifyPluginConfigExists(config, configPath, "DurableStateStore") - config.getConfig(configPath).withFallback(config.getConfig("akka.persistence.state-plugin-fallback")) + config.getConfig(configPath).withFallback(config.getConfig("pekko.persistence.state-plugin-fallback")) } } diff --git a/akka-persistence-typed/src/test/java/org/apache/pekko/persistence/typed/javadsl/LoggerSourceTest.java b/akka-persistence-typed/src/test/java/org/apache/pekko/persistence/typed/javadsl/LoggerSourceTest.java index 944c7c7828..18e4fbaaa2 100644 --- a/akka-persistence-typed/src/test/java/org/apache/pekko/persistence/typed/javadsl/LoggerSourceTest.java +++ b/akka-persistence-typed/src/test/java/org/apache/pekko/persistence/typed/javadsl/LoggerSourceTest.java @@ -29,8 +29,8 @@ public class LoggerSourceTest extends JUnitSuite { private static final Config config = ConfigFactory.parseString( - "akka.persistence.journal.plugin = \"akka.persistence.journal.inmem\" \n" - + "akka.persistence.journal.inmem.test-serialization = on \n"); + "pekko.persistence.journal.plugin = \"pekko.persistence.journal.inmem\" \n" + + "pekko.persistence.journal.inmem.test-serialization = on \n"); @ClassRule public static final TestKitJunitResource testKit = new TestKitJunitResource(config); diff --git a/akka-persistence-typed/src/test/java/org/apache/pekko/persistence/typed/javadsl/NullEmptyStateTest.java b/akka-persistence-typed/src/test/java/org/apache/pekko/persistence/typed/javadsl/NullEmptyStateTest.java index eb7fd19149..bfac5592af 100644 --- a/akka-persistence-typed/src/test/java/org/apache/pekko/persistence/typed/javadsl/NullEmptyStateTest.java +++ b/akka-persistence-typed/src/test/java/org/apache/pekko/persistence/typed/javadsl/NullEmptyStateTest.java @@ -23,8 +23,8 @@ public class NullEmptyStateTest extends JUnitSuite { private static final Config config = ConfigFactory.parseString( - "akka.persistence.journal.plugin = \"akka.persistence.journal.inmem\" \n" - + "akka.persistence.journal.inmem.test-serialization = on \n"); + "pekko.persistence.journal.plugin = \"pekko.persistence.journal.inmem\" \n" + + "pekko.persistence.journal.inmem.test-serialization = on \n"); @ClassRule public static final TestKitJunitResource testKit = new TestKitJunitResource(config); diff --git a/akka-persistence-typed/src/test/java/org/apache/pekko/persistence/typed/javadsl/PrimitiveStateTest.java b/akka-persistence-typed/src/test/java/org/apache/pekko/persistence/typed/javadsl/PrimitiveStateTest.java index 94a4225e14..80c6b0320a 100644 --- a/akka-persistence-typed/src/test/java/org/apache/pekko/persistence/typed/javadsl/PrimitiveStateTest.java +++ b/akka-persistence-typed/src/test/java/org/apache/pekko/persistence/typed/javadsl/PrimitiveStateTest.java @@ -23,8 +23,8 @@ public class PrimitiveStateTest extends JUnitSuite { private static final Config config = ConfigFactory.parseString( - "akka.persistence.journal.plugin = \"akka.persistence.journal.inmem\" \n" - + "akka.persistence.journal.inmem.test-serialization = on \n"); + "pekko.persistence.journal.plugin = \"pekko.persistence.journal.inmem\" \n" + + "pekko.persistence.journal.inmem.test-serialization = on \n"); @ClassRule public static final TestKitJunitResource testKit = new TestKitJunitResource(config); diff --git a/akka-persistence-typed/src/test/scala/docs/org/apache/pekko/persistence/typed/PersistentFsmToTypedMigrationSpec.scala b/akka-persistence-typed/src/test/scala/docs/org/apache/pekko/persistence/typed/PersistentFsmToTypedMigrationSpec.scala index 9174d10443..a77e9f6e74 100644 --- a/akka-persistence-typed/src/test/scala/docs/org/apache/pekko/persistence/typed/PersistentFsmToTypedMigrationSpec.scala +++ b/akka-persistence-typed/src/test/scala/docs/org/apache/pekko/persistence/typed/PersistentFsmToTypedMigrationSpec.scala @@ -39,11 +39,11 @@ import pekko.actor.testkit.typed.scaladsl.LogCapturing object PersistentFsmToTypedMigrationSpec { // cannot be moved to testkit journals as it requires sharing journal content across actor system instances val config = ConfigFactory.parseString(s""" - akka.actor.allow-java-serialization = on - akka.persistence.journal.leveldb.dir = "target/typed-persistence-${UUID.randomUUID().toString}" - akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/typed-persistence-${UUID.randomUUID().toString}" + pekko.actor.allow-java-serialization = on + pekko.persistence.journal.leveldb.dir = "target/typed-persistence-${UUID.randomUUID().toString}" + pekko.persistence.journal.plugin = "pekko.persistence.journal.leveldb" + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/typed-persistence-${UUID.randomUUID().toString}" """) } diff --git a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/ClusterSingletonPersistenceSpec.scala b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/ClusterSingletonPersistenceSpec.scala index 2fd43d6c51..9e3a50b21b 100644 --- a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/ClusterSingletonPersistenceSpec.scala +++ b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/ClusterSingletonPersistenceSpec.scala @@ -22,15 +22,15 @@ import pekko.persistence.typed.scaladsl.EventSourcedBehavior object ClusterSingletonPersistenceSpec { val config = ConfigFactory.parseString(""" - akka.actor.provider = cluster - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.artery.canonical.hostname = 127.0.0.1 + pekko.actor.provider = cluster + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.hostname = 127.0.0.1 - akka.coordinated-shutdown.terminate-actor-system = off - akka.coordinated-shutdown.run-by-actor-system-terminate = off + pekko.coordinated-shutdown.terminate-actor-system = off + pekko.coordinated-shutdown.run-by-actor-system-terminate = off - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" """.stripMargin) sealed trait Command diff --git a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/ManyRecoveriesSpec.scala b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/ManyRecoveriesSpec.scala index 191dc72a18..2023bfe8bd 100644 --- a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/ManyRecoveriesSpec.scala +++ b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/ManyRecoveriesSpec.scala @@ -51,15 +51,15 @@ object ManyRecoveriesSpec { } class ManyRecoveriesSpec extends ScalaTestWithActorTestKit(s""" - akka.actor.default-dispatcher { + pekko.actor.default-dispatcher { type = Dispatcher executor = "thread-pool-executor" thread-pool-executor { fixed-pool-size = 5 } } - akka.persistence.max-concurrent-recoveries = 3 - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" + pekko.persistence.max-concurrent-recoveries = 3 + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" """) with AnyWordSpecLike with LogCapturing { import ManyRecoveriesSpec._ diff --git a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/StashingWhenSnapshottingSpec.scala b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/StashingWhenSnapshottingSpec.scala index 79ecb0f38e..e657a64f06 100644 --- a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/StashingWhenSnapshottingSpec.scala +++ b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/StashingWhenSnapshottingSpec.scala @@ -57,13 +57,13 @@ object StashingWhenSnapshottingSpec { slow-snapshot { class = "org.apache.pekko.persistence.typed.StashingWhenSnapshottingSpec$$ControllableSnapshotStore" } - akka.actor.allow-java-serialization = on - akka { + pekko.actor.allow-java-serialization = on + pekko { loglevel = "INFO" persistence { journal { - plugin = "akka.persistence.journal.inmem" + plugin = "pekko.persistence.journal.inmem" auto-start-journals = [] } diff --git a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/delivery/EventSourcedProducerQueueSpec.scala b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/delivery/EventSourcedProducerQueueSpec.scala index 65734e4a61..f452534bac 100644 --- a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/delivery/EventSourcedProducerQueueSpec.scala +++ b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/delivery/EventSourcedProducerQueueSpec.scala @@ -30,10 +30,10 @@ import pekko.persistence.typed.PersistenceId object EventSourcedProducerQueueSpec { def conf: Config = ConfigFactory.parseString(s""" - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.journal.inmem.test-serialization = on - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/EventSourcedDurableProducerQueueSpec-${UUID + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.journal.inmem.test-serialization = on + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/EventSourcedDurableProducerQueueSpec-${UUID .randomUUID() .toString}" """) diff --git a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/delivery/ReliableDeliveryWithEventSourcedProducerQueueSpec.scala b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/delivery/ReliableDeliveryWithEventSourcedProducerQueueSpec.scala index a1527e29d1..fd61558539 100644 --- a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/delivery/ReliableDeliveryWithEventSourcedProducerQueueSpec.scala +++ b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/delivery/ReliableDeliveryWithEventSourcedProducerQueueSpec.scala @@ -19,13 +19,13 @@ import pekko.persistence.typed.PersistenceId object ReliableDeliveryWithEventSourcedProducerQueueSpec { def conf: Config = ConfigFactory.parseString(s""" - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.journal.inmem.test-serialization = on - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/ProducerControllerWithEventSourcedProducerQueueSpec-${UUID + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.journal.inmem.test-serialization = on + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/ProducerControllerWithEventSourcedProducerQueueSpec-${UUID .randomUUID() .toString}" - akka.reliable-delivery.consumer-controller.flow-control-window = 20 + pekko.reliable-delivery.consumer-controller.flow-control-window = 20 """) } @@ -176,5 +176,5 @@ class ReliableDeliveryWithEventSourcedProducerQueueSpec(config: Config) class ReliableDeliveryWithEventSourcedProducerQueueChunkedSpec extends ReliableDeliveryWithEventSourcedProducerQueueSpec( ConfigFactory.parseString(""" - akka.reliable-delivery.producer-controller.chunk-large-messages = 1b + pekko.reliable-delivery.producer-controller.chunk-large-messages = 1b """).withFallback(ReliableDeliveryWithEventSourcedProducerQueueSpec.conf)) diff --git a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/delivery/WorkPullingWithEventSourcedProducerQueueSpec.scala b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/delivery/WorkPullingWithEventSourcedProducerQueueSpec.scala index fb9a0f8883..c6ad254ae0 100644 --- a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/delivery/WorkPullingWithEventSourcedProducerQueueSpec.scala +++ b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/delivery/WorkPullingWithEventSourcedProducerQueueSpec.scala @@ -23,12 +23,12 @@ import pekko.persistence.typed.PersistenceId object WorkPullingWithEventSourcedProducerQueueSpec { def conf: Config = ConfigFactory.parseString(s""" - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/WorkPullingWithEventSourcedProducerQueueSpec-${UUID + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/WorkPullingWithEventSourcedProducerQueueSpec-${UUID .randomUUID() .toString}" - akka.reliable-delivery.consumer-controller.flow-control-window = 20 + pekko.reliable-delivery.consumer-controller.flow-control-window = 20 """) } diff --git a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/internal/RecoveryPermitterSpec.scala b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/internal/RecoveryPermitterSpec.scala index 52662e086e..57f7298a7d 100644 --- a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/internal/RecoveryPermitterSpec.scala +++ b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/internal/RecoveryPermitterSpec.scala @@ -71,10 +71,10 @@ object RecoveryPermitterSpec { } class RecoveryPermitterSpec extends ScalaTestWithActorTestKit(s""" - akka.persistence.max-concurrent-recoveries = 3 - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.journal.inmem.test-serialization = on - akka.loggers = ["org.apache.pekko.testkit.TestEventListener"] + pekko.persistence.max-concurrent-recoveries = 3 + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.journal.inmem.test-serialization = on + pekko.loggers = ["org.apache.pekko.testkit.TestEventListener"] """) with AnyWordSpecLike with LogCapturing { import RecoveryPermitterSpec._ diff --git a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/SnapshotIsOptionalSpec.scala b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/SnapshotIsOptionalSpec.scala index 89e86f2652..a497a94042 100644 --- a/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/SnapshotIsOptionalSpec.scala +++ b/akka-persistence-typed/src/test/scala/org/apache/pekko/persistence/typed/scaladsl/SnapshotIsOptionalSpec.scala @@ -21,10 +21,10 @@ import pekko.serialization.jackson.CborSerializable object SnapshotIsOptionalSpec { private val conf: Config = ConfigFactory.parseString(s""" - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/typed-persistence-${UUID.randomUUID().toString}" - akka.persistence.snapshot-store.local.snapshot-is-optional = true + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/typed-persistence-${UUID.randomUUID().toString}" + pekko.persistence.snapshot-store.local.snapshot-is-optional = true """) case class State1(field1: String) extends CborSerializable { @JsonCreator diff --git a/akka-persistence/src/main/resources/reference.conf b/akka-persistence/src/main/resources/reference.conf index 7b1ec967a8..f72f1c824f 100644 --- a/akka-persistence/src/main/resources/reference.conf +++ b/akka-persistence/src/main/resources/reference.conf @@ -1,15 +1,15 @@ ########################################################### -# Akka Persistence Extension Reference Configuration File # +# Pekko Persistence Extension Reference Configuration File # ########################################################### # This is the reference config file that contains all the default settings. # Make your edits in your application.conf in order to override these settings. # Directory of persistence journal and snapshot store plugins is available at the -# Akka Community Projects page https://akka.io/community/ +# Pekko Community Projects page https://akka.io/community/ # Default persistence extension settings. -akka.persistence { +pekko.persistence { # When starting many persistent actors at the same time the journal # and its data store is protected from being overloaded by limiting number @@ -45,7 +45,7 @@ akka.persistence { auto-start-snapshot-stores = [] } # used as default-snapshot store if no plugin configured - # (see `akka.persistence.snapshot-store`) + # (see `pekko.persistence.snapshot-store`) no-snapshot-store { class = "org.apache.pekko.persistence.snapshot.NoSnapshotStore" } @@ -102,10 +102,10 @@ akka.persistence { class = "" # Dispatcher for the plugin actor. - plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher" + plugin-dispatcher = "pekko.persistence.dispatchers.default-plugin-dispatcher" # Dispatcher for message replay. - replay-dispatcher = "akka.persistence.dispatchers.default-replay-dispatcher" + replay-dispatcher = "pekko.persistence.dispatchers.default-replay-dispatcher" # Removed: used to be the Maximum size of a persistent message batch written to the journal. # Now this setting is without function, PersistentActor will write as many messages @@ -161,7 +161,7 @@ akka.persistence { class = "" # Dispatcher for the plugin actor. - plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher" + plugin-dispatcher = "pekko.persistence.dispatchers.default-plugin-dispatcher" circuit-breaker { max-failures = 5 @@ -205,14 +205,14 @@ akka.persistence { } # Protobuf serialization for the persistent extension messages. -akka.actor { +pekko.actor { serializers { - akka-persistence-message = "org.apache.pekko.persistence.serialization.MessageSerializer" - akka-persistence-snapshot = "org.apache.pekko.persistence.serialization.SnapshotSerializer" + pekko-persistence-message = "org.apache.pekko.persistence.serialization.MessageSerializer" + pekko-persistence-snapshot = "org.apache.pekko.persistence.serialization.SnapshotSerializer" } serialization-bindings { - "org.apache.pekko.persistence.serialization.Message" = akka-persistence-message - "org.apache.pekko.persistence.serialization.Snapshot" = akka-persistence-snapshot + "org.apache.pekko.persistence.serialization.Message" = pekko-persistence-message + "org.apache.pekko.persistence.serialization.Snapshot" = pekko-persistence-snapshot } serialization-identifiers { "org.apache.pekko.persistence.serialization.MessageSerializer" = 7 @@ -226,24 +226,24 @@ akka.actor { ################################################### # In-memory journal plugin. -akka.persistence.journal.inmem { +pekko.persistence.journal.inmem { # Class name of the plugin. class = "org.apache.pekko.persistence.journal.inmem.InmemJournal" # Dispatcher for the plugin actor. - plugin-dispatcher = "akka.actor.default-dispatcher" + plugin-dispatcher = "pekko.actor.default-dispatcher" # Turn this on to test serialization of the events test-serialization = off } # Local file system snapshot store plugin. -akka.persistence.snapshot-store.local { +pekko.persistence.snapshot-store.local { # Class name of the plugin. class = "org.apache.pekko.persistence.snapshot.local.LocalSnapshotStore" # Dispatcher for the plugin actor. - plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher" + plugin-dispatcher = "pekko.persistence.dispatchers.default-plugin-dispatcher" # Dispatcher for streaming snapshot IO. - stream-dispatcher = "akka.persistence.dispatchers.default-stream-dispatcher" + stream-dispatcher = "pekko.persistence.dispatchers.default-stream-dispatcher" # Storage location of snapshot files. dir = "snapshots" # Number load attempts when recovering from the latest snapshot fails @@ -256,13 +256,13 @@ akka.persistence.snapshot-store.local { # LevelDB journal plugin. # Note: this plugin requires explicit LevelDB dependency, see below. -akka.persistence.journal.leveldb { +pekko.persistence.journal.leveldb { # Class name of the plugin. class = "org.apache.pekko.persistence.journal.leveldb.LeveldbJournal" # Dispatcher for the plugin actor. - plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher" + plugin-dispatcher = "pekko.persistence.dispatchers.default-plugin-dispatcher" # Dispatcher for message replay. - replay-dispatcher = "akka.persistence.dispatchers.default-replay-dispatcher" + replay-dispatcher = "pekko.persistence.dispatchers.default-replay-dispatcher" # Storage location of LevelDB files. dir = "journal" # Use fsync on write. @@ -278,18 +278,18 @@ akka.persistence.journal.leveldb { # Shared LevelDB journal plugin (for testing only). # Note: this plugin requires explicit LevelDB dependency, see below. -akka.persistence.journal.leveldb-shared { +pekko.persistence.journal.leveldb-shared { # Class name of the plugin. class = "org.apache.pekko.persistence.journal.leveldb.SharedLeveldbJournal" # Dispatcher for the plugin actor. - plugin-dispatcher = "akka.actor.default-dispatcher" + plugin-dispatcher = "pekko.actor.default-dispatcher" # Timeout for async journal operations. timeout = 10s store { # Dispatcher for shared store actor. - store-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher" + store-dispatcher = "pekko.persistence.dispatchers.default-plugin-dispatcher" # Dispatcher for message replay. - replay-dispatcher = "akka.persistence.dispatchers.default-replay-dispatcher" + replay-dispatcher = "pekko.persistence.dispatchers.default-replay-dispatcher" # Storage location of LevelDB files. dir = "journal" # Use fsync on write. @@ -304,11 +304,11 @@ akka.persistence.journal.leveldb-shared { } } -akka.persistence.journal.proxy { +pekko.persistence.journal.proxy { # Class name of the plugin. class = "org.apache.pekko.persistence.journal.PersistencePluginProxy" # Dispatcher for the plugin actor. - plugin-dispatcher = "akka.actor.default-dispatcher" + plugin-dispatcher = "pekko.actor.default-dispatcher" # Set this to on in the configuration of the ActorSystem # that will host the target journal start-target-journal = off @@ -320,11 +320,11 @@ akka.persistence.journal.proxy { init-timeout = 10s } -akka.persistence.snapshot-store.proxy { +pekko.persistence.snapshot-store.proxy { # Class name of the plugin. class = "org.apache.pekko.persistence.journal.PersistencePluginProxy" # Dispatcher for the plugin actor. - plugin-dispatcher = "akka.actor.default-dispatcher" + plugin-dispatcher = "pekko.actor.default-dispatcher" # Set this to on in the configuration of the ActorSystem # that will host the target snapshot-store start-target-snapshot-store = off diff --git a/akka-persistence/src/main/scala/org/apache/pekko/persistence/AtLeastOnceDelivery.scala b/akka-persistence/src/main/scala/org/apache/pekko/persistence/AtLeastOnceDelivery.scala index 976bd602b6..0f34591e81 100644 --- a/akka-persistence/src/main/scala/org/apache/pekko/persistence/AtLeastOnceDelivery.scala +++ b/akka-persistence/src/main/scala/org/apache/pekko/persistence/AtLeastOnceDelivery.scala @@ -179,7 +179,7 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { * Interval between redelivery attempts. * * The default value can be configured with the - * `akka.persistence.at-least-once-delivery.redeliver-interval` + * `pekko.persistence.at-least-once-delivery.redeliver-interval` * configuration key. This method can be overridden by implementation classes to return * non-default values. */ @@ -195,7 +195,7 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { * this helps to prevent an overwhelming amount of messages to be sent at once. * * The default value can be configured with the - * `akka.persistence.at-least-once-delivery.redelivery-burst-limit` + * `pekko.persistence.at-least-once-delivery.redelivery-burst-limit` * configuration key. This method can be overridden by implementation classes to return * non-default values. */ @@ -209,7 +209,7 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { * will be sent to `self`. The count is reset after a restart. * * The default value can be configured with the - * `akka.persistence.at-least-once-delivery.warn-after-number-of-unconfirmed-attempts` + * `pekko.persistence.at-least-once-delivery.warn-after-number-of-unconfirmed-attempts` * configuration key. This method can be overridden by implementation classes to return * non-default values. */ @@ -224,7 +224,7 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { * [[AtLeastOnceDelivery.MaxUnconfirmedMessagesExceededException]]. * * The default value can be configured with the - * `akka.persistence.at-least-once-delivery.max-unconfirmed-messages` + * `pekko.persistence.at-least-once-delivery.max-unconfirmed-messages` * configuration key. This method can be overridden by implementation classes to return * non-default values. */ diff --git a/akka-persistence/src/main/scala/org/apache/pekko/persistence/Persistence.scala b/akka-persistence/src/main/scala/org/apache/pekko/persistence/Persistence.scala index 4bb076f8e9..1bf033b61c 100644 --- a/akka-persistence/src/main/scala/org/apache/pekko/persistence/Persistence.scala +++ b/akka-persistence/src/main/scala/org/apache/pekko/persistence/Persistence.scala @@ -74,7 +74,7 @@ trait PersistenceIdentity { /** * Configuration id of the journal plugin servicing this persistent actor. - * When empty, looks in `akka.persistence.journal.plugin` to find configuration entry path. + * When empty, looks in `pekko.persistence.journal.plugin` to find configuration entry path. * When configured, uses `journalPluginId` as absolute path to the journal configuration entry. * Configuration entry must contain few required fields, such as `class`. See `src/main/resources/reference.conf`. */ @@ -82,7 +82,7 @@ trait PersistenceIdentity { /** * Configuration id of the snapshot plugin servicing this persistent actor. - * When empty, looks in `akka.persistence.snapshot-store.plugin` to find configuration entry path. + * When empty, looks in `pekko.persistence.snapshot-store.plugin` to find configuration entry path. * When configured, uses `snapshotPluginId` as absolute path to the snapshot store configuration entry. * Configuration entry must contain few required fields, such as `class`. See `src/main/resources/reference.conf`. */ @@ -162,10 +162,10 @@ object Persistence extends ExtensionId[Persistence] with ExtensionIdProvider { } /** Config path to fall-back to if a setting is not defined in a specific plugin's config section */ - val JournalFallbackConfigPath = "akka.persistence.journal-plugin-fallback" + val JournalFallbackConfigPath = "pekko.persistence.journal-plugin-fallback" /** Config path to fall-back to if a setting is not defined in a specific snapshot plugin's config section */ - val SnapshotStoreFallbackConfigPath = "akka.persistence.snapshot-store-plugin-fallback" + val SnapshotStoreFallbackConfigPath = "pekko.persistence.snapshot-store-plugin-fallback" /** * INTERNAL API @@ -203,9 +203,9 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { private def log: LoggingAdapter = Logging(system, classOf[Persistence]) - private val NoSnapshotStorePluginId = "akka.persistence.no-snapshot-store" + private val NoSnapshotStorePluginId = "pekko.persistence.no-snapshot-store" - private val config = system.settings.config.getConfig("akka.persistence") + private val config = system.settings.config.getConfig("pekko.persistence") /** * INTERNAL API: When starting many persistent actors at the same time the journal @@ -232,7 +232,7 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { if (isEmpty(configPath)) { log.warning( "No default snapshot store configured! " + - "To configure a default snapshot-store plugin set the `akka.persistence.snapshot-store.plugin` key. " + + "To configure a default snapshot-store plugin set the `pekko.persistence.snapshot-store.plugin` key. " + "For details see 'reference.conf'") NoSnapshotStorePluginId } else { @@ -322,7 +322,7 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { /** * INTERNAL API * Returns the plugin config identified by `pluginId`. - * When empty, looks in `akka.persistence.journal.plugin` to find configuration entry path. + * When empty, looks in `pekko.persistence.journal.plugin` to find configuration entry path. * When configured, uses `journalPluginId` as absolute path to the journal configuration entry. */ private[pekko] final def journalConfigFor( @@ -348,7 +348,7 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { /** * INTERNAL API * Returns a journal plugin actor identified by `journalPluginId`. - * When empty, looks in `akka.persistence.journal.plugin` to find configuration entry path. + * When empty, looks in `pekko.persistence.journal.plugin` to find configuration entry path. * When configured, uses `journalPluginId` as absolute path to the journal configuration entry. * Configuration entry must contain few required fields, such as `class`. See `src/main/resources/reference.conf`. */ @@ -365,7 +365,7 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { * INTERNAL API * * Returns a snapshot store plugin actor identified by `snapshotPluginId`. - * When empty, looks in `akka.persistence.snapshot-store.plugin` to find configuration entry path. + * When empty, looks in `pekko.persistence.snapshot-store.plugin` to find configuration entry path. * When configured, uses `snapshotPluginId` as absolute path to the snapshot store configuration entry. * Configuration entry must contain few required fields, such as `class`. See `src/main/resources/reference.conf`. */ diff --git a/akka-persistence/src/main/scala/org/apache/pekko/persistence/fsm/PersistentFSM.scala b/akka-persistence/src/main/scala/org/apache/pekko/persistence/fsm/PersistentFSM.scala index 37ef8aae29..e478c1cc4a 100644 --- a/akka-persistence/src/main/scala/org/apache/pekko/persistence/fsm/PersistentFSM.scala +++ b/akka-persistence/src/main/scala/org/apache/pekko/persistence/fsm/PersistentFSM.scala @@ -36,10 +36,10 @@ private[pekko] object SnapshotAfter extends ExtensionId[SnapshotAfter] with Exte /** * SnapshotAfter enables PersistentFSM to take periodical snapshot. - * See `akka.persistence.fsm.snapshot-after` for configuration options. + * See `pekko.persistence.fsm.snapshot-after` for configuration options. */ private[pekko] class SnapshotAfter(config: Config) extends Extension { - val key = "akka.persistence.fsm.snapshot-after" + val key = "pekko.persistence.fsm.snapshot-after" val snapshotAfterValue = config.getString(key).toLowerCase match { case "off" => None case _ => Some(config.getInt(key)) diff --git a/akka-persistence/src/main/scala/org/apache/pekko/persistence/journal/PersistencePluginProxy.scala b/akka-persistence/src/main/scala/org/apache/pekko/persistence/journal/PersistencePluginProxy.scala index 4ce1386f59..4956adbe88 100644 --- a/akka-persistence/src/main/scala/org/apache/pekko/persistence/journal/PersistencePluginProxy.scala +++ b/akka-persistence/src/main/scala/org/apache/pekko/persistence/journal/PersistencePluginProxy.scala @@ -32,13 +32,13 @@ object PersistencePluginProxy { def setTargetLocation(system: ActorSystem, address: Address): Unit = { Persistence(system).journalFor(null) ! TargetLocation(address) - if (system.settings.config.getString("akka.persistence.snapshot-store.plugin") != "") + if (system.settings.config.getString("pekko.persistence.snapshot-store.plugin") != "") Persistence(system).snapshotStoreFor(null) ! TargetLocation(address) } def start(system: ActorSystem): Unit = { Persistence(system).journalFor(null) - if (system.settings.config.getString("akka.persistence.snapshot-store.plugin") != "") + if (system.settings.config.getString("pekko.persistence.snapshot-store.plugin") != "") Persistence(system).snapshotStoreFor(null) } @@ -80,8 +80,8 @@ final class PersistencePluginProxy(config: Config) extends Actor with Stash with private val pluginId = self.path.name private val pluginType: PluginType = pluginId match { - case "akka.persistence.journal.proxy" => Journal - case "akka.persistence.snapshot-store.proxy" => SnapshotStore + case "pekko.persistence.journal.proxy" => Journal + case "pekko.persistence.snapshot-store.proxy" => SnapshotStore case other => throw new IllegalArgumentException("Unknown plugin type: " + other) } diff --git a/akka-persistence/src/main/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournal.scala b/akka-persistence/src/main/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournal.scala index 1f131ebdbf..af3709b911 100644 --- a/akka-persistence/src/main/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournal.scala +++ b/akka-persistence/src/main/scala/org/apache/pekko/persistence/journal/leveldb/LeveldbJournal.scala @@ -33,7 +33,7 @@ private[persistence] class LeveldbJournal(cfg: Config) extends AsyncWriteJournal override def prepareConfig: Config = if (cfg ne LeveldbStore.emptyConfig) cfg - else context.system.settings.config.getConfig("akka.persistence.journal.leveldb") + else context.system.settings.config.getConfig("pekko.persistence.journal.leveldb") override def receivePluginInternal: Receive = receiveCompactionInternal.orElse { case ReplayTaggedMessages(fromSequenceNr, toSequenceNr, max, tag, replyTo) => @@ -132,7 +132,7 @@ private[persistence] object LeveldbJournal { */ private[persistence] class SharedLeveldbJournal extends AsyncWriteProxy { val timeout: Timeout = - context.system.settings.config.getMillisDuration("akka.persistence.journal.leveldb-shared.timeout") + context.system.settings.config.getMillisDuration("pekko.persistence.journal.leveldb-shared.timeout") override def receivePluginInternal: Receive = { case cmd: LeveldbJournal.SubscriptionCommand => @@ -170,7 +170,7 @@ object SharedLeveldbJournal { */ def configToEnableJavaSerializationForTest: Config = { ConfigFactory.parseString(s""" - akka.actor.serialization-bindings { + pekko.actor.serialization-bindings { "org.apache.pekko.persistence.journal.AsyncWriteTarget$$WriteMessages" = java-test "org.apache.pekko.persistence.journal.AsyncWriteTarget$$DeleteMessagesTo" = java-test "org.apache.pekko.persistence.journal.AsyncWriteTarget$$ReplayMessages" = java-test diff --git a/akka-persistence/src/main/scala/org/apache/pekko/persistence/journal/leveldb/SharedLeveldbStore.scala b/akka-persistence/src/main/scala/org/apache/pekko/persistence/journal/leveldb/SharedLeveldbStore.scala index 79e5c272e2..bc0f006263 100644 --- a/akka-persistence/src/main/scala/org/apache/pekko/persistence/journal/leveldb/SharedLeveldbStore.scala +++ b/akka-persistence/src/main/scala/org/apache/pekko/persistence/journal/leveldb/SharedLeveldbStore.scala @@ -31,7 +31,7 @@ class SharedLeveldbStore(cfg: Config) extends LeveldbStore { override def prepareConfig: Config = if (cfg ne LeveldbStore.emptyConfig) cfg.getConfig("store") - else context.system.settings.config.getConfig("akka.persistence.journal.leveldb-shared.store") + else context.system.settings.config.getConfig("pekko.persistence.journal.leveldb-shared.store") def receive = receiveCompactionInternal.orElse { case WriteMessages(messages) => diff --git a/akka-persistence/src/main/scala/org/apache/pekko/persistence/serialization/MessageSerializer.scala b/akka-persistence/src/main/scala/org/apache/pekko/persistence/serialization/MessageSerializer.scala index 5d3709873e..f29933f6af 100644 --- a/akka-persistence/src/main/scala/org/apache/pekko/persistence/serialization/MessageSerializer.scala +++ b/akka-persistence/src/main/scala/org/apache/pekko/persistence/serialization/MessageSerializer.scala @@ -23,7 +23,7 @@ import pekko.serialization._ import pekko.util.ccompat._ /** - * Marker trait for all protobuf-serializable messages in `akka.persistence`. + * Marker trait for all protobuf-serializable messages in `pekko.persistence`. */ trait Message extends Serializable diff --git a/akka-persistence/src/main/scala/org/apache/pekko/persistence/state/DurableStateStoreRegistry.scala b/akka-persistence/src/main/scala/org/apache/pekko/persistence/state/DurableStateStoreRegistry.scala index 7c00f665b0..5b2314e9f2 100644 --- a/akka-persistence/src/main/scala/org/apache/pekko/persistence/state/DurableStateStoreRegistry.scala +++ b/akka-persistence/src/main/scala/org/apache/pekko/persistence/state/DurableStateStoreRegistry.scala @@ -52,7 +52,7 @@ class DurableStateStoreRegistry(system: ExtendedActorSystem) private val systemConfig = system.settings.config private lazy val defaultPluginId = { - val configPath = systemConfig.getString("akka.persistence.state.plugin") + val configPath = systemConfig.getString("pekko.persistence.state.plugin") Persistence.verifyPluginConfigIsDefined(configPath, "Default DurableStateStore") Persistence.verifyPluginConfigExists(systemConfig, configPath, "DurableStateStore") configPath @@ -66,7 +66,7 @@ class DurableStateStoreRegistry(system: ExtendedActorSystem) private def pluginConfig(pluginId: String): Config = { val configPath = pluginIdOrDefault(pluginId) - systemConfig.getConfig(configPath).withFallback(systemConfig.getConfig("akka.persistence.state-plugin-fallback")) + systemConfig.getConfig(configPath).withFallback(systemConfig.getConfig("pekko.persistence.state-plugin-fallback")) } /** Check for default or missing identity. */ diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/AtLeastOnceDeliveryFailureSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/AtLeastOnceDeliveryFailureSpec.scala index 99770ed17d..f35395a495 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/AtLeastOnceDeliveryFailureSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/AtLeastOnceDeliveryFailureSpec.scala @@ -17,18 +17,18 @@ import pekko.testkit._ object AtLeastOnceDeliveryFailureSpec { val config = ConfigFactory.parseString(s""" - akka.persistence.sender.chaos.live-processing-failure-rate = 0.3 - akka.persistence.sender.chaos.replay-processing-failure-rate = 0.1 - akka.persistence.destination.chaos.confirm-failure-rate = 0.3 - akka.persistence.journal.plugin = "akka.persistence.journal.chaos" - akka.persistence.journal.chaos.write-failure-rate = 0.3 - akka.persistence.journal.chaos.confirm-failure-rate = 0.2 - akka.persistence.journal.chaos.delete-failure-rate = 0.3 - akka.persistence.journal.chaos.replay-failure-rate = 0.25 - akka.persistence.journal.chaos.read-highest-failure-rate = 0.1 - akka.persistence.journal.chaos.class = org.apache.pekko.persistence.journal.chaos.ChaosJournal - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/snapshots-at-least-once-delivery-failure-spec/" + pekko.persistence.sender.chaos.live-processing-failure-rate = 0.3 + pekko.persistence.sender.chaos.replay-processing-failure-rate = 0.1 + pekko.persistence.destination.chaos.confirm-failure-rate = 0.3 + pekko.persistence.journal.plugin = "pekko.persistence.journal.chaos" + pekko.persistence.journal.chaos.write-failure-rate = 0.3 + pekko.persistence.journal.chaos.confirm-failure-rate = 0.2 + pekko.persistence.journal.chaos.delete-failure-rate = 0.3 + pekko.persistence.journal.chaos.replay-failure-rate = 0.25 + pekko.persistence.journal.chaos.read-highest-failure-rate = 0.1 + pekko.persistence.journal.chaos.class = org.apache.pekko.persistence.journal.chaos.ChaosJournal + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/snapshots-at-least-once-delivery-failure-spec/" """) val numMessages = 10 @@ -69,7 +69,7 @@ object AtLeastOnceDeliveryFailureSpec { with ChaosSupport with ActorLogging with AtLeastOnceDelivery { - val config = context.system.settings.config.getConfig("akka.persistence.sender.chaos") + val config = context.system.settings.config.getConfig("pekko.persistence.sender.chaos") val liveProcessingFailureRate = config.getDouble("live-processing-failure-rate") val replayProcessingFailureRate = config.getDouble("replay-processing-failure-rate") @@ -128,7 +128,7 @@ object AtLeastOnceDeliveryFailureSpec { } class ChaosDestination(val probe: ActorRef) extends Actor with ChaosSupport with ActorLogging { - val config = context.system.settings.config.getConfig("akka.persistence.destination.chaos") + val config = context.system.settings.config.getConfig("pekko.persistence.destination.chaos") val confirmFailureRate = config.getDouble("confirm-failure-rate") def receive = { diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/EndToEndEventAdapterSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/EndToEndEventAdapterSpec.scala index e447df3d63..9b0f75cc57 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/EndToEndEventAdapterSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/EndToEndEventAdapterSpec.scala @@ -118,7 +118,7 @@ class EndToEndEventAdapterSpec extends AnyWordSpecLike with Matchers with Before val journalName = "leveldb" val journalConfig = PersistenceSpec.config("leveldb", "LeveldbEndToEndEventAdapterSpec") - val storageLocations = List("akka.persistence.journal.leveldb.dir").map(s => new File(journalConfig.getString(s))) + val storageLocations = List("pekko.persistence.journal.leveldb.dir").map(s => new File(journalConfig.getString(s))) override protected def beforeAll(): Unit = { storageLocations.foreach(FileUtils.deleteDirectory) @@ -131,7 +131,7 @@ class EndToEndEventAdapterSpec extends AnyWordSpecLike with Matchers with Before val noAdaptersConfig = ConfigFactory.parseString("") val adaptersConfig = ConfigFactory.parseString(s""" - |akka.persistence.journal { + |pekko.persistence.journal { | $journalName { | event-adapters { | a = "${classOf[EndToEndEventAdapterSpec].getCanonicalName}$$AEndToEndAdapter" @@ -147,11 +147,11 @@ class EndToEndEventAdapterSpec extends AnyWordSpecLike with Matchers with Before | } | } |} - |akka.loggers = ["org.apache.pekko.testkit.TestEventListener"] + |pekko.loggers = ["org.apache.pekko.testkit.TestEventListener"] """.stripMargin) val newAdaptersConfig = ConfigFactory.parseString(s""" - |akka.persistence.journal { + |pekko.persistence.journal { | $journalName { | event-adapters { | a = "${classOf[EndToEndEventAdapterSpec].getCanonicalName}$$NewAEndToEndAdapter" @@ -170,7 +170,7 @@ class EndToEndEventAdapterSpec extends AnyWordSpecLike with Matchers with Before """.stripMargin) def persister(name: String, probe: Option[ActorRef] = None)(implicit system: ActorSystem) = - system.actorOf(Props(classOf[EndToEndAdapterActor], name, "akka.persistence.journal." + journalName, probe)) + system.actorOf(Props(classOf[EndToEndAdapterActor], name, "pekko.persistence.journal." + journalName, probe)) def withActorSystem[T](name: String, config: Config)(block: ActorSystem => T): T = { val system = ActorSystem(name, journalConfig.withFallback(config)) @@ -242,7 +242,7 @@ class EndToEndEventAdapterSpec extends AnyWordSpecLike with Matchers with Before "give nice error message when unable to play back as adapter does not exist" in { // after some time, we start the system a-new... // and the adapter originally used for adapting A is missing from the configuration! - val journalPath = s"akka.persistence.journal.$journalName" + val journalPath = s"pekko.persistence.journal.$journalName" val missingAdapterConfig = adaptersConfig .withoutPath(s"$journalPath.event-adapters.a") .withoutPath( @@ -250,7 +250,7 @@ class EndToEndEventAdapterSpec extends AnyWordSpecLike with Matchers with Before withActorSystem("MissingAdapterSystem", journalConfig.withFallback(missingAdapterConfig)) { implicit system2 => intercept[IllegalArgumentException] { - Persistence(system2).adaptersFor(s"akka.persistence.journal.$journalName").get(classOf[String]) + Persistence(system2).adaptersFor(s"pekko.persistence.journal.$journalName").get(classOf[String]) }.getMessage should include("was bound to undefined event-adapter: a (bindings: [a, b], known adapters: b)") } } diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/EventAdapterSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/EventAdapterSpec.scala index c811c33e6a..2c803cb1fd 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/EventAdapterSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/EventAdapterSpec.scala @@ -110,7 +110,7 @@ class EventAdapterSpec(journalName: String, journalConfig: Config, adapterConfig "inmem", PersistenceSpec.config("inmem", "InmemPersistentTaggingSpec"), ConfigFactory.parseString(s""" - |akka.persistence.journal { + |pekko.persistence.journal { | | common-event-adapters { | age = "${classOf[EventAdapterSpec].getCanonicalName}$$UserAgeTaggingAdapter" @@ -118,7 +118,7 @@ class EventAdapterSpec(journalName: String, journalConfig: Config, adapterConfig | } | | inmem { - | event-adapters = $${akka.persistence.journal.common-event-adapters} + | event-adapters = $${pekko.persistence.journal.common-event-adapters} | event-adapter-bindings { | "${EventAdapterSpec.DomainEventClassName}" = age | "${EventAdapterSpec.JournalModelClassName}" = age @@ -126,7 +126,7 @@ class EventAdapterSpec(journalName: String, journalConfig: Config, adapterConfig | } | | with-actor-system { - | class = $${akka.persistence.journal.inmem.class} + | class = $${pekko.persistence.journal.inmem.class} | dir = "journal-1" | | event-adapters { @@ -138,10 +138,10 @@ class EventAdapterSpec(journalName: String, journalConfig: Config, adapterConfig | } | | replay-pass-through-adapter-journal { - | class = $${akka.persistence.journal.inmem.class} + | class = $${pekko.persistence.journal.inmem.class} | dir = "journal-2" | - | event-adapters = $${akka.persistence.journal.common-event-adapters} + | event-adapters = $${pekko.persistence.journal.common-event-adapters} | event-adapter-bindings { | "${EventAdapterSpec.JournalModelClassName}" = replay-pass-through | "${EventAdapterSpec.DomainEventClassName}" = replay-pass-through @@ -149,20 +149,20 @@ class EventAdapterSpec(journalName: String, journalConfig: Config, adapterConfig | } | | no-adapter { - | class = $${akka.persistence.journal.inmem.class} + | class = $${pekko.persistence.journal.inmem.class} | dir = "journal-3" | } |} """.stripMargin)) def persister(name: String, journalId: String = journalName) = - system.actorOf(Props(classOf[PersistAllIncomingActor], name, "akka.persistence.journal." + journalId)) + system.actorOf(Props(classOf[PersistAllIncomingActor], name, "pekko.persistence.journal." + journalId)) def toJournal(in: Any, journalId: String = journalName) = - Persistence(system).adaptersFor("akka.persistence.journal." + journalId).get(in.getClass).toJournal(in) + Persistence(system).adaptersFor("pekko.persistence.journal." + journalId).get(in.getClass).toJournal(in) def fromJournal(in: Any, journalId: String = journalName) = - Persistence(system).adaptersFor("akka.persistence.journal." + journalId).get(in.getClass).fromJournal(in, "") + Persistence(system).adaptersFor("pekko.persistence.journal." + journalId).get(in.getClass).fromJournal(in, "") "EventAdapter" must { diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/EventSourcedActorDeleteFailureSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/EventSourcedActorDeleteFailureSpec.scala index 64fa10af4d..4c1c569c15 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/EventSourcedActorDeleteFailureSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/EventSourcedActorDeleteFailureSpec.scala @@ -52,7 +52,7 @@ class EventSourcedActorDeleteFailureSpec "SnapshotFailureRobustnessSpec", extraConfig = Some( """ - akka.persistence.journal.inmem.class = "org.apache.pekko.persistence.EventSourcedActorDeleteFailureSpec$DeleteFailingInmemJournal" + pekko.persistence.journal.inmem.class = "org.apache.pekko.persistence.EventSourcedActorDeleteFailureSpec$DeleteFailingInmemJournal" """))) with ImplicitSender { import EventSourcedActorDeleteFailureSpec._ diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/EventSourcedActorFailureSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/EventSourcedActorFailureSpec.scala index 4327422fe6..d2f26696c6 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/EventSourcedActorFailureSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/EventSourcedActorFailureSpec.scala @@ -150,7 +150,7 @@ class EventSourcedActorFailureSpec "SnapshotFailureRobustnessSpec", extraConfig = Some( """ - akka.persistence.journal.inmem.class = "org.apache.pekko.persistence.EventSourcedActorFailureSpec$FailingInmemJournal" + pekko.persistence.journal.inmem.class = "org.apache.pekko.persistence.EventSourcedActorFailureSpec$FailingInmemJournal" """))) with ImplicitSender { diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/LoadPluginSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/LoadPluginSpec.scala index 77ce137d4e..b36f97a06b 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/LoadPluginSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/LoadPluginSpec.scala @@ -36,8 +36,8 @@ class LoadPluginSpec "inmem", "LoadJournalSpec", extraConfig = Some(""" - akka.persistence.journal.inmem.class = "org.apache.pekko.persistence.LoadPluginSpec$JournalWithConfig" - akka.persistence.journal.inmem.extra-property = 17 + pekko.persistence.journal.inmem.class = "org.apache.pekko.persistence.LoadPluginSpec$JournalWithConfig" + pekko.persistence.journal.inmem.extra-property = 17 test-plugin { class = "org.apache.pekko.persistence.LoadPluginSpec$JournalWithStartupNotification" @@ -48,7 +48,7 @@ class LoadPluginSpec "A journal" must { "be created with plugin config" in { - val journalRef = Persistence(system).journalFor("akka.persistence.journal.inmem") + val journalRef = Persistence(system).journalFor("pekko.persistence.journal.inmem") journalRef ! GetConfig expectMsgType[Config].getInt("extra-property") should be(17) } diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/ManyRecoveriesSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/ManyRecoveriesSpec.scala index 71292a70d4..bdb8331869 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/ManyRecoveriesSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/ManyRecoveriesSpec.scala @@ -42,16 +42,16 @@ object ManyRecoveriesSpec { } class ManyRecoveriesSpec extends PersistenceSpec(ConfigFactory.parseString(s""" - akka.actor.default-dispatcher { + pekko.actor.default-dispatcher { type = Dispatcher executor = "thread-pool-executor" thread-pool-executor { fixed-pool-size = 5 } } - akka.persistence.max-concurrent-recoveries = 3 - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.actor.warn-about-java-serializer-usage = off + pekko.persistence.max-concurrent-recoveries = 3 + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.actor.warn-about-java-serializer-usage = off """)) with ImplicitSender { import ManyRecoveriesSpec._ diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/OptionalSnapshotStoreSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/OptionalSnapshotStoreSpec.scala index 64096030a4..50a199d6ad 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/OptionalSnapshotStoreSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/OptionalSnapshotStoreSpec.scala @@ -29,18 +29,18 @@ object OptionalSnapshotStoreSpec { } class PickedSnapshotStorePersistentActor(name: String) extends AnyPersistentActor(name) { - override def snapshotPluginId: String = "akka.persistence.snapshot-store.local" + override def snapshotPluginId: String = "pekko.persistence.snapshot-store.local" } } class OptionalSnapshotStoreSpec extends PersistenceSpec(ConfigFactory.parseString(s""" - akka.persistence.publish-plugin-commands = on - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" + pekko.persistence.publish-plugin-commands = on + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.warn-about-java-serializer-usage = off # snapshot store plugin is NOT defined, things should still work - akka.persistence.snapshot-store.local.dir = "target/snapshots-${classOf[OptionalSnapshotStoreSpec].getName}/" + pekko.persistence.snapshot-store.local.dir = "target/snapshots-${classOf[OptionalSnapshotStoreSpec].getName}/" """)) with ImplicitSender { import OptionalSnapshotStoreSpec._ diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/PerformanceSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/PerformanceSpec.scala index 4ccac749f2..496db669e3 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/PerformanceSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/PerformanceSpec.scala @@ -15,9 +15,9 @@ import pekko.testkit._ object PerformanceSpec { val config = """ - akka.persistence.performance.cycles.load = 100 + pekko.persistence.performance.cycles.load = 100 # more accurate throughput measurements - #akka.persistence.performance.cycles.load = 200000 + #pekko.persistence.performance.cycles.load = 200000 """ case object StopMeasure @@ -122,7 +122,7 @@ class PerformanceSpec with ImplicitSender { import PerformanceSpec._ - val loadCycles = system.settings.config.getInt("akka.persistence.performance.cycles.load") + val loadCycles = system.settings.config.getInt("pekko.persistence.performance.cycles.load") def stressPersistentActor(persistentActor: ActorRef, failAt: Option[Long], description: String): Unit = { failAt.foreach { persistentActor ! FailAt(_) } diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistenceSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistenceSpec.scala index 1e98f7f523..b49f5e7b6b 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistenceSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistenceSpec.scala @@ -63,24 +63,24 @@ object PersistenceSpec { .map(ConfigFactory.parseString(_)) .getOrElse(ConfigFactory.empty()) .withFallback(ConfigFactory.parseString(s""" - akka.actor.serialize-creators = $serialization - akka.actor.serialize-messages = $serialization - akka.actor.no-serialization-verification-needed-class-prefix = [] + pekko.actor.serialize-creators = $serialization + pekko.actor.serialize-messages = $serialization + pekko.actor.no-serialization-verification-needed-class-prefix = [] # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off - akka.persistence.publish-plugin-commands = on - akka.persistence.journal.plugin = "akka.persistence.journal.${plugin}" - akka.persistence.journal.leveldb.dir = "target/journal-${test}" - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/snapshots-${test}/" - akka.test.single-expect-default = 10s + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off + pekko.persistence.publish-plugin-commands = on + pekko.persistence.journal.plugin = "pekko.persistence.journal.${plugin}" + pekko.persistence.journal.leveldb.dir = "target/journal-${test}" + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/snapshots-${test}/" + pekko.test.single-expect-default = 10s """)) } trait Cleanup { this: AkkaSpec => val storageLocations = - List("akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) + List("pekko.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) override protected def atStartup(): Unit = { storageLocations.foreach(FileUtils.deleteDirectory) diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorBoundedStashingSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorBoundedStashingSpec.scala index 9a86c85ed0..e8ba62ed23 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorBoundedStashingSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorBoundedStashingSpec.scala @@ -47,9 +47,9 @@ object PersistentActorBoundedStashingSpec { val templateConfig = s""" - |akka.actor.default-mailbox.stash-capacity=$capacity - |akka.actor.guardian-supervisor-strategy="org.apache.pekko.actor.StoppingSupervisorStrategy" - |akka.persistence.internal-stash-overflow-strategy = "%s" + |pekko.actor.default-mailbox.stash-capacity=$capacity + |pekko.actor.guardian-supervisor-strategy="org.apache.pekko.actor.StoppingSupervisorStrategy" + |pekko.persistence.internal-stash-overflow-strategy = "%s" |""".stripMargin val throwConfig = String.format(templateConfig, "org.apache.pekko.persistence.ThrowExceptionConfigurator") diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorJournalProtocolSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorJournalProtocolSpec.scala index 89d69703a0..0bb737968e 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorJournalProtocolSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorJournalProtocolSpec.scala @@ -20,8 +20,8 @@ puppet { class = "org.apache.pekko.persistence.JournalPuppet" max-message-batch-size = 10 } -akka.persistence.journal.plugin = puppet -akka.persistence.snapshot-store.plugin = "akka.persistence.no-snapshot-store" +pekko.persistence.journal.plugin = puppet +pekko.persistence.snapshot-store.plugin = "pekko.persistence.no-snapshot-store" """) sealed trait Command diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorRecoveryTimeoutSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorRecoveryTimeoutSpec.scala index 078814f9cf..715263ccbd 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorRecoveryTimeoutSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorRecoveryTimeoutSpec.scala @@ -21,7 +21,7 @@ object PersistentActorRecoveryTimeoutSpec { SteppingInmemJournal .config(PersistentActorRecoveryTimeoutSpec.journalId) .withFallback(ConfigFactory.parseString(""" - |akka.persistence.journal.stepping-inmem.recovery-event-timeout=1s + |pekko.persistence.journal.stepping-inmem.recovery-event-timeout=1s """.stripMargin)) .withFallback(PersistenceSpec.config("stepping-inmem", "PersistentActorRecoveryTimeoutSpec")) diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorSpec.scala index 11e6f191f5..80e364e0dc 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/PersistentActorSpec.scala @@ -1667,10 +1667,10 @@ class InmemPersistentActorWithRuntimePluginConfigSpec """.stripMargin) .withValue( s"custom.persistence.journal.inmem", - system.settings.config.getValue(s"akka.persistence.journal.inmem")) + system.settings.config.getValue(s"pekko.persistence.journal.inmem")) .withValue( "custom.persistence.snapshot-store.local", - system.settings.config.getValue("akka.persistence.snapshot-store.local")) + system.settings.config.getValue("pekko.persistence.snapshot-store.local")) } override protected def behavior1PersistentActor: ActorRef = diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/RecoveryPermitterSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/RecoveryPermitterSpec.scala index 42ff1b2249..36dd1c6a40 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/RecoveryPermitterSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/RecoveryPermitterSpec.scala @@ -46,9 +46,9 @@ object RecoveryPermitterSpec { } class RecoveryPermitterSpec extends PersistenceSpec(ConfigFactory.parseString(s""" - akka.persistence.max-concurrent-recoveries = 3 - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.actor.warn-about-java-serializer-usage = off + pekko.persistence.max-concurrent-recoveries = 3 + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.actor.warn-about-java-serializer-usage = off """)) with ImplicitSender { import RecoveryPermitter._ import RecoveryPermitterSpec._ diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotDirectoryFailureSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotDirectoryFailureSpec.scala index 9e4f04d747..c4561f0892 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotDirectoryFailureSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotDirectoryFailureSpec.scala @@ -35,7 +35,7 @@ class SnapshotDirectoryFailureSpec "inmem", "SnapshotDirectoryFailureSpec", extraConfig = Some(s""" - akka.persistence.snapshot-store.local.dir = "${SnapshotDirectoryFailureSpec.inUseSnapshotPath}" + pekko.persistence.snapshot-store.local.dir = "${SnapshotDirectoryFailureSpec.inUseSnapshotPath}" """))) with ImplicitSender { diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotFailureRobustnessSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotFailureRobustnessSpec.scala index c7d285a05e..e191d7740a 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotFailureRobustnessSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotFailureRobustnessSpec.scala @@ -42,7 +42,7 @@ object SnapshotFailureRobustnessSpec { // TODO do we call it "snapshot store" or "snapshot plugin", small inconsistency here override def snapshotPluginId: String = - "akka.persistence.snapshot-store.local-delete-fail" + "pekko.persistence.snapshot-store.local-delete-fail" override def receiveRecover: Receive = { case SnapshotOffer(md, s) => probe ! ((md, s)) @@ -105,9 +105,9 @@ class SnapshotFailureRobustnessSpec "SnapshotFailureRobustnessSpec", serialization = "off", extraConfig = Some(s""" - akka.persistence.snapshot-store.local.class = "org.apache.pekko.persistence.SnapshotFailureRobustnessSpec$$FailingLocalSnapshotStore" - akka.persistence.snapshot-store.local-delete-fail = $${akka.persistence.snapshot-store.local} - akka.persistence.snapshot-store.local-delete-fail.class = "org.apache.pekko.persistence.SnapshotFailureRobustnessSpec$$DeleteFailingLocalSnapshotStore" + pekko.persistence.snapshot-store.local.class = "org.apache.pekko.persistence.SnapshotFailureRobustnessSpec$$FailingLocalSnapshotStore" + pekko.persistence.snapshot-store.local-delete-fail = $${pekko.persistence.snapshot-store.local} + pekko.persistence.snapshot-store.local-delete-fail.class = "org.apache.pekko.persistence.SnapshotFailureRobustnessSpec$$DeleteFailingLocalSnapshotStore" """))) with ImplicitSender { @@ -215,10 +215,10 @@ class SnapshotIsOptionalSpec "SnapshotFailureReplayEventsSpec", serialization = "off", extraConfig = Some(s""" - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.class = "org.apache.pekko.persistence.SnapshotFailureRobustnessSpec$$FailingLocalSnapshotStore" - akka.persistence.snapshot-store.local.dir = "target/persistence-${UUID.randomUUID().toString}" - akka.persistence.snapshot-store.local.snapshot-is-optional = true + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.class = "org.apache.pekko.persistence.SnapshotFailureRobustnessSpec$$FailingLocalSnapshotStore" + pekko.persistence.snapshot-store.local.dir = "target/persistence-${UUID.randomUUID().toString}" + pekko.persistence.snapshot-store.local.snapshot-is-optional = true """))) with ImplicitSender { diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotRecoveryWithEmptyJournalSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotRecoveryWithEmptyJournalSpec.scala index 7cb8125adf..2a9c76a70c 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotRecoveryWithEmptyJournalSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotRecoveryWithEmptyJournalSpec.scala @@ -67,7 +67,7 @@ class SnapshotRecoveryWithEmptyJournalSpec "inmem", "SnapshotRecoveryWithEmptyJournalSpec", extraConfig = Some(s""" - akka.persistence.snapshot-store.local.dir = "${SnapshotRecoveryWithEmptyJournalSpec.survivingSnapshotPath}" + pekko.persistence.snapshot-store.local.dir = "${SnapshotRecoveryWithEmptyJournalSpec.survivingSnapshotPath}" """))) with ImplicitSender { diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotSerializationSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotSerializationSpec.scala index fb4e17e983..3d779ee927 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotSerializationSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/SnapshotSerializationSpec.scala @@ -73,7 +73,7 @@ class SnapshotSerializationSpec "SnapshotSerializationSpec", serialization = "off", extraConfig = Some(""" - akka.actor { + pekko.actor { serializers { my-snapshot = "org.apache.pekko.persistence.SnapshotSerializationSpec$MySerializer" } diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/TimerPersistentActorSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/TimerPersistentActorSpec.scala index 07b6330dcd..4dc4ba4ea9 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/TimerPersistentActorSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/TimerPersistentActorSpec.scala @@ -82,8 +82,8 @@ object TimerPersistentActorSpec { } class TimerPersistentActorSpec extends PersistenceSpec(ConfigFactory.parseString(s""" - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.actor.warn-about-java-serializer-usage = off + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.actor.warn-about-java-serializer-usage = off """)) with ImplicitSender { import TimerPersistentActorSpec._ diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/fsm/PersistentFSMSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/fsm/PersistentFSMSpec.scala index 4c99a91d70..5a4d8403fb 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/fsm/PersistentFSMSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/fsm/PersistentFSMSpec.scala @@ -364,12 +364,12 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) } - "save periodical snapshots if akka.persistence.fsm.enable-snapshot-after = on" in { + "save periodical snapshots if pekko.persistence.fsm.enable-snapshot-after = on" in { val sys2 = ActorSystem( "PersistentFsmSpec2", ConfigFactory.parseString(""" - akka.persistence.fsm.enable-snapshot-after = on - akka.persistence.fsm.snapshot-after = 3 + pekko.persistence.fsm.enable-snapshot-after = on + pekko.persistence.fsm.snapshot-after = 3 """).withFallback(PersistenceSpec.config("inmem", "PersistentFSMSpec2"))) try { @@ -391,7 +391,7 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) } finally { val storageLocations = - List("akka.persistence.snapshot-store.local.dir").map(s => new File(sys2.settings.config.getString(s))) + List("pekko.persistence.snapshot-store.local.dir").map(s => new File(sys2.settings.config.getString(s))) shutdown(sys2) storageLocations.foreach(FileUtils.deleteDirectory) } diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/InmemEventAdaptersSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/InmemEventAdaptersSpec.scala index 4afaf64be8..15c72189eb 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/InmemEventAdaptersSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/InmemEventAdaptersSpec.scala @@ -15,8 +15,8 @@ import scala.annotation.nowarn class InmemEventAdaptersSpec extends AkkaSpec { val config = ConfigFactory.parseString(s""" - |akka.persistence.journal { - | plugin = "akka.persistence.journal.inmem" + |pekko.persistence.journal { + | plugin = "pekko.persistence.journal.inmem" | | | # adapters defined for all plugins @@ -47,7 +47,7 @@ class InmemEventAdaptersSpec extends AkkaSpec { """.stripMargin).withFallback(ConfigFactory.load()) val extendedActorSystem = system.asInstanceOf[ExtendedActorSystem] - val inmemConfig = config.getConfig("akka.persistence.journal.inmem") + val inmemConfig = config.getConfig("pekko.persistence.journal.inmem") "EventAdapters" must { "parse configuration and resolve adapter definitions" in { @@ -74,14 +74,14 @@ class InmemEventAdaptersSpec extends AkkaSpec { "fail with useful message when binding to not defined adapter" in { val badConfig = ConfigFactory.parseString(""" - |akka.persistence.journal.inmem { + |pekko.persistence.journal.inmem { | event-adapter-bindings { | "java.lang.Integer" = undefined-adapter | } |} """.stripMargin) - val combinedConfig = badConfig.getConfig("akka.persistence.journal.inmem") + val combinedConfig = badConfig.getConfig("pekko.persistence.journal.inmem") val ex = intercept[IllegalArgumentException] { EventAdapters(extendedActorSystem, combinedConfig) } diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/SteppingInmemJournal.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/SteppingInmemJournal.scala index eeb4519c29..e28467308e 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/SteppingInmemJournal.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/SteppingInmemJournal.scala @@ -35,9 +35,9 @@ object SteppingInmemJournal { def config(instanceId: String): Config = ConfigFactory.parseString(s""" - |akka.persistence.journal.stepping-inmem.class=${classOf[SteppingInmemJournal].getName} - |akka.persistence.journal.plugin = "akka.persistence.journal.stepping-inmem" - |akka.persistence.journal.stepping-inmem.instance-id = "$instanceId" + |pekko.persistence.journal.stepping-inmem.class=${classOf[SteppingInmemJournal].getName} + |pekko.persistence.journal.plugin = "pekko.persistence.journal.stepping-inmem" + |pekko.persistence.journal.stepping-inmem.instance-id = "$instanceId" """.stripMargin) // keep it in a thread safe:d global so that tests can get their @@ -68,7 +68,7 @@ final class SteppingInmemJournal extends InmemJournal { import SteppingInmemJournal._ import context.dispatcher - val instanceId = context.system.settings.config.getString("akka.persistence.journal.stepping-inmem.instance-id") + val instanceId = context.system.settings.config.getString("pekko.persistence.journal.stepping-inmem.instance-id") var queuedOps: Seq[() => Future[Unit]] = Seq.empty var queuedTokenRecipients = List.empty[ActorRef] diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/chaos/ChaosJournal.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/chaos/ChaosJournal.scala index ef713446ee..0a7999a592 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/chaos/ChaosJournal.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/chaos/ChaosJournal.scala @@ -33,7 +33,7 @@ private object ChaosJournalMessages extends InmemMessages class ChaosJournal extends AsyncWriteJournal { import ChaosJournalMessages.{ delete => del, _ } - val config = context.system.settings.config.getConfig("akka.persistence.journal.chaos") + val config = context.system.settings.config.getConfig("pekko.persistence.journal.chaos") val writeFailureRate = config.getDouble("write-failure-rate") val deleteFailureRate = config.getDouble("delete-failure-rate") val replayFailureRate = config.getDouble("replay-failure-rate") diff --git a/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/leveldb/JournalCompactionSpec.scala b/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/leveldb/JournalCompactionSpec.scala index 1c0929ed98..27261a7781 100644 --- a/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/leveldb/JournalCompactionSpec.scala +++ b/akka-persistence/src/test/scala/org/apache/pekko/persistence/journal/leveldb/JournalCompactionSpec.scala @@ -135,7 +135,7 @@ abstract class JournalCompactionSpecBase(val builder: SpecComponentBuilder) exte def calculateJournalSize(): Long = FileUtils.sizeOfDirectory(journalDir) def journalDir: File = { - val relativePath = system.settings.config.getString("akka.persistence.journal.leveldb.dir") + val relativePath = system.settings.config.getString("pekko.persistence.journal.leveldb.dir") new File(relativePath).getAbsoluteFile } @@ -150,7 +150,7 @@ object JournalCompactionSpec { "leveldb", specId, extraConfig = Some(s""" - | akka.persistence.journal.leveldb.compaction-intervals.$specId = $compactionInterval + | pekko.persistence.journal.leveldb.compaction-intervals.$specId = $compactionInterval """.stripMargin)) } diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/AttemptSysMsgRedeliverySpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/AttemptSysMsgRedeliverySpec.scala index 3be84f27d0..5294db80e3 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/AttemptSysMsgRedeliverySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/AttemptSysMsgRedeliverySpec.scala @@ -26,8 +26,8 @@ class AttemptSysMsgRedeliveryMultiJvmSpec(artery: Boolean) extends MultiNodeConf val third = role("third") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.remote.artery.enabled = $artery - akka.remote.use-unsafe-remote-features-outside-cluster = on + pekko.remote.artery.enabled = $artery + pekko.remote.use-unsafe-remote-features-outside-cluster = on """)).withFallback(RemotingMultiNodeSpec.commonConfig)) testTransport(on = true) diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/LookupRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/LookupRemoteActorSpec.scala index 13b90a2fe9..7bf564e54d 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/LookupRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/LookupRemoteActorSpec.scala @@ -19,7 +19,7 @@ import pekko.testkit._ class LookupRemoteActorMultiJvmSpec(artery: Boolean) extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.remote.artery.enabled = $artery + pekko.remote.artery.enabled = $artery """)).withFallback(RemotingMultiNodeSpec.commonConfig)) val leader = role("leader") diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/NewRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/NewRemoteActorSpec.scala index fbe86548eb..b6aca324c3 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/NewRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/NewRemoteActorSpec.scala @@ -20,9 +20,9 @@ import pekko.util.unused class NewRemoteActorMultiJvmSpec(artery: Boolean) extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.remote.log-remote-lifecycle-events = off - akka.remote.artery.enabled = $artery - akka.remote.use-unsafe-remote-features-outside-cluster = on + pekko.remote.log-remote-lifecycle-events = off + pekko.remote.artery.enabled = $artery + pekko.remote.use-unsafe-remote-features-outside-cluster = on """).withFallback(RemotingMultiNodeSpec.commonConfig))) val leader = role("leader") diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/PiercingShouldKeepQuarantineSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/PiercingShouldKeepQuarantineSpec.scala index 3bcfabef71..3d7a524a5f 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/PiercingShouldKeepQuarantineSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/PiercingShouldKeepQuarantineSpec.scala @@ -18,8 +18,8 @@ class PiercingShouldKeepQuarantineConfig(artery: Boolean) extends MultiNodeConfi val second = role("second") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.remote.retry-gate-closed-for = 0.5s - akka.remote.artery.enabled = $artery + pekko.remote.retry-gate-closed-for = 0.5s + pekko.remote.artery.enabled = $artery """)).withFallback(RemotingMultiNodeSpec.commonConfig)) } diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteDeliverySpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteDeliverySpec.scala index a5d3d8aee4..bb8673026d 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteDeliverySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteDeliverySpec.scala @@ -25,7 +25,7 @@ class RemoteDeliveryConfig(artery: Boolean) extends MultiNodeConfig { val third = role("third") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.remote.artery.enabled = $artery + pekko.remote.artery.enabled = $artery """)).withFallback(RemotingMultiNodeSpec.commonConfig)) } diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteDeploymentDeathWatchSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteDeploymentDeathWatchSpec.scala index 58e83cbf13..7e9731e61e 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteDeploymentDeathWatchSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteDeploymentDeathWatchSpec.scala @@ -25,10 +25,10 @@ class RemoteDeploymentDeathWatchMultiJvmSpec(artery: Boolean) extends MultiNodeC val third = role("third") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.remote.log-remote-lifecycle-events = off - akka.remote.artery.enabled = $artery - akka.remote.use-unsafe-remote-features-outside-cluster = on + pekko.loglevel = INFO + pekko.remote.log-remote-lifecycle-events = off + pekko.remote.artery.enabled = $artery + pekko.remote.use-unsafe-remote-features-outside-cluster = on """)).withFallback(RemotingMultiNodeSpec.commonConfig)) deployOn(second, """/hello.remote = "@third@" """) diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteFeaturesSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteFeaturesSpec.scala index 351cd236e1..19960adc72 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteFeaturesSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteFeaturesSpec.scala @@ -42,9 +42,9 @@ class RemotingFeaturesConfig(val useUnsafe: Boolean, artery: Boolean) extends Mu val iterationCount = 10 protected val baseConfig = ConfigFactory.parseString(s""" - akka.remote.use-unsafe-remote-features-outside-cluster = $useUnsafe - akka.remote.log-remote-lifecycle-events = off - akka.remote.artery.enabled = $artery + pekko.remote.use-unsafe-remote-features-outside-cluster = $useUnsafe + pekko.remote.log-remote-lifecycle-events = off + pekko.remote.artery.enabled = $artery """).withFallback(RemotingMultiNodeSpec.commonConfig) commonConfig(debugConfig(on = false).withFallback(baseConfig)) @@ -354,7 +354,7 @@ abstract class RemotingFeaturesSpec(val multiNodeConfig: RemotingFeaturesConfig) } } - s"Deploy routers with expected behavior if 'akka.remote.use-unsafe-remote-features-outside-cluster=$useUnsafe'" must { + s"Deploy routers with expected behavior if 'pekko.remote.use-unsafe-remote-features-outside-cluster=$useUnsafe'" must { "deployments" in { runOn(first, second, third, fourth) { val deployment1 = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(List("service-hello")) diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteNodeDeathWatchSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteNodeDeathWatchSpec.scala index c3bc1651ff..1f79d49bf9 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteNodeDeathWatchSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteNodeDeathWatchSpec.scala @@ -27,12 +27,12 @@ class RemoteNodeDeathWatchConfig(artery: Boolean) extends MultiNodeConfig { val third = role("third") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.remote.log-remote-lifecycle-events = off + pekko.loglevel = INFO + pekko.remote.log-remote-lifecycle-events = off ## Use a tighter setting than the default, otherwise it takes 20s for DeathWatch to trigger - akka.remote.watch-failure-detector.acceptable-heartbeat-pause = 3 s - akka.remote.artery.enabled = $artery - akka.remote.use-unsafe-remote-features-outside-cluster = on + pekko.remote.watch-failure-detector.acceptable-heartbeat-pause = 3 s + pekko.remote.artery.enabled = $artery + pekko.remote.use-unsafe-remote-features-outside-cluster = on """)).withFallback(RemotingMultiNodeSpec.commonConfig)) } diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteNodeRestartDeathWatchSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteNodeRestartDeathWatchSpec.scala index 72bb3f3184..865b6e2dba 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteNodeRestartDeathWatchSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteNodeRestartDeathWatchSpec.scala @@ -28,12 +28,12 @@ class RemoteNodeRestartDeathWatchConfig(artery: Boolean) extends MultiNodeConfig val second = role("second") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.remote.log-remote-lifecycle-events = off - akka.remote.classic.transport-failure-detector.heartbeat-interval = 1 s - akka.remote.classic.transport-failure-detector.acceptable-heartbeat-pause = 3 s - akka.remote.artery.enabled = $artery - akka.remote.use-unsafe-remote-features-outside-cluster = on + pekko.loglevel = INFO + pekko.remote.log-remote-lifecycle-events = off + pekko.remote.classic.transport-failure-detector.heartbeat-interval = 1 s + pekko.remote.classic.transport-failure-detector.acceptable-heartbeat-pause = 3 s + pekko.remote.artery.enabled = $artery + pekko.remote.use-unsafe-remote-features-outside-cluster = on """))) testTransport(on = true) @@ -114,8 +114,8 @@ abstract class RemoteNodeRestartDeathWatchSpec(multiNodeConfig: RemoteNodeRestar val freshSystem = ActorSystem( system.name, ConfigFactory.parseString(s""" - akka.remote.classic.netty.tcp.port = ${address.port.get} - akka.remote.artery.canonical.port = ${address.port.get} + pekko.remote.classic.netty.tcp.port = ${address.port.get} + pekko.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) freshSystem.actorOf(Props[Subject](), "subject") diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteQuarantinePiercingSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteQuarantinePiercingSpec.scala index 1500ea8111..580573a8b5 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteQuarantinePiercingSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteQuarantinePiercingSpec.scala @@ -20,12 +20,12 @@ class RemoteQuarantinePiercingConfig(artery: Boolean) extends MultiNodeConfig { val second = role("second") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.remote.log-remote-lifecycle-events = INFO - akka.remote.artery.enabled = $artery + pekko.loglevel = INFO + pekko.remote.log-remote-lifecycle-events = INFO + pekko.remote.artery.enabled = $artery # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """)).withFallback(RemotingMultiNodeSpec.commonConfig)) } @@ -116,8 +116,8 @@ abstract class RemoteQuarantinePiercingSpec(multiNodeConfig: RemoteQuarantinePie val freshSystem = ActorSystem( system.name, ConfigFactory.parseString(s""" - akka.remote.classic.netty.tcp.port = ${address.port.get} - akka.remote.artery.canonical.port = ${address.port.get} + pekko.remote.classic.netty.tcp.port = ${address.port.get} + pekko.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) freshSystem.actorOf(Props[Subject](), "subject") diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteReDeploymentSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteReDeploymentSpec.scala index bf9b3f4763..6c2b1eab96 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteReDeploymentSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemoteReDeploymentSpec.scala @@ -26,19 +26,19 @@ class RemoteReDeploymentConfig(artery: Boolean) extends MultiNodeConfig { val second = role("second") commonConfig( - debugConfig(on = false).withFallback(ConfigFactory.parseString(s"""akka.remote.classic.transport-failure-detector { + debugConfig(on = false).withFallback(ConfigFactory.parseString(s"""pekko.remote.classic.transport-failure-detector { threshold=0.1 heartbeat-interval=0.1s acceptable-heartbeat-pause=1s } - akka.remote.watch-failure-detector { + pekko.remote.watch-failure-detector { threshold=0.1 heartbeat-interval=0.1s acceptable-heartbeat-pause=2.5s } - akka.remote.artery.enabled = $artery - akka.remote.use-unsafe-remote-features-outside-cluster = on - akka.loglevel = INFO + pekko.remote.artery.enabled = $artery + pekko.remote.use-unsafe-remote-features-outside-cluster = on + pekko.loglevel = INFO """)).withFallback(RemotingMultiNodeSpec.commonConfig)) testTransport(on = true) diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemotingMultiNodeSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemotingMultiNodeSpec.scala index a9d651d216..4cd050f36f 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemotingMultiNodeSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/RemotingMultiNodeSpec.scala @@ -16,7 +16,7 @@ object RemotingMultiNodeSpec { def commonConfig = ConfigFactory.parseString(s""" - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.warn-about-java-serializer-usage = off """).withFallback(ArterySpecSupport.tlsConfig) // TLS only used if transport=tls-tcp } diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/TransportFailSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/TransportFailSpec.scala index 115e2518a3..94ff046685 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/TransportFailSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/TransportFailSpec.scala @@ -30,9 +30,9 @@ object TransportFailConfig extends MultiNodeConfig { val second = role("second") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.remote.use-unsafe-remote-features-outside-cluster = on - akka.remote.classic { + pekko.loglevel = INFO + pekko.remote.use-unsafe-remote-features-outside-cluster = on + pekko.remote.classic { transport-failure-detector { implementation-class = "org.apache.pekko.remote.TransportFailSpec$$TestFailureDetector" heartbeat-interval = 1 s diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/BenchmarkFileReporter.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/BenchmarkFileReporter.scala index 52dc6503be..f0e4165746 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/BenchmarkFileReporter.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/BenchmarkFileReporter.scala @@ -41,15 +41,15 @@ object BenchmarkFileReporter { "org.apache.pekko.test.LatencySpec.totalMessagesFactor", "org.apache.pekko.test.LatencySpec.repeatCount", "org.apache.pekko.test.LatencySpec.real-message", - "akka.remote.artery.enabled", - "akka.remote.artery.advanced.inbound-lanes", - "akka.remote.artery.advanced.buffer-pool-size", - "akka.remote.artery.advanced.aeron.idle-cpu-level", - "akka.remote.artery.advanced.aeron.embedded-media-driver", - "akka.remote.default-remote-dispatcher.throughput", - "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-factor", - "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-min", - "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-max") + "pekko.remote.artery.enabled", + "pekko.remote.artery.advanced.inbound-lanes", + "pekko.remote.artery.advanced.buffer-pool-size", + "pekko.remote.artery.advanced.aeron.idle-cpu-level", + "pekko.remote.artery.advanced.aeron.embedded-media-driver", + "pekko.remote.default-remote-dispatcher.throughput", + "pekko.remote.default-remote-dispatcher.fork-join-executor.parallelism-factor", + "pekko.remote.default-remote-dispatcher.fork-join-executor.parallelism-min", + "pekko.remote.default-remote-dispatcher.fork-join-executor.parallelism-max") apply(test, system, settingsToReport) } diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/DirectMemorySpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/DirectMemorySpec.scala index 3421b4ecf8..36e0b0b2ca 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/DirectMemorySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/DirectMemorySpec.scala @@ -22,14 +22,14 @@ object DirectMemorySpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.loglevel = WARNING - akka.remote.log-remote-lifecycle-events = WARNING - akka.remote.artery.enabled = on - akka.remote.artery.large-message-destinations = ["/user/large"] - akka.remote.artery.buffer-pool-size = 32 - akka.remote.artery.maximum-frame-size = 256 KiB - akka.remote.artery.large-buffer-pool-size = 4 - akka.remote.artery.maximum-large-frame-size = 2 MiB + pekko.loglevel = WARNING + pekko.remote.log-remote-lifecycle-events = WARNING + pekko.remote.artery.enabled = on + pekko.remote.artery.large-message-destinations = ["/user/large"] + pekko.remote.artery.buffer-pool-size = 32 + pekko.remote.artery.maximum-frame-size = 256 KiB + pekko.remote.artery.large-buffer-pool-size = 4 + pekko.remote.artery.maximum-large-frame-size = 2 MiB """)) .withFallback(RemotingMultiNodeSpec.commonConfig)) @@ -71,8 +71,8 @@ abstract class DirectMemorySpec extends MultiNodeSpec(DirectMemorySpec) with STM "Direct memory allocation" should { "not cause OutOfMemoryError" in within(10.seconds) { // twice the buffer pool size - val nrOfRegularMessages = 2 * system.settings.config.getInt("akka.remote.artery.buffer-pool-size") - val nrOfLargeMessages = 2 * system.settings.config.getInt("akka.remote.artery.large-buffer-pool-size") + val nrOfRegularMessages = 2 * system.settings.config.getInt("pekko.remote.artery.buffer-pool-size") + val nrOfLargeMessages = 2 * system.settings.config.getInt("pekko.remote.artery.large-buffer-pool-size") val large = system.actorOf(Props(classOf[CountingEcho], testActor, nrOfLargeMessages), "large") val regular = system.actorOf(Props(classOf[CountingEcho], testActor, nrOfRegularMessages), "regular") diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/FanInThrougputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/FanInThrougputSpec.scala index 0595da77e2..d9c3e87671 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/FanInThrougputSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/FanInThrougputSpec.scala @@ -35,7 +35,7 @@ object FanInThroughputSpec extends MultiNodeConfig { org.apache.pekko.test.FanInThroughputSpec.totalMessagesFactor = 10.0 org.apache.pekko.test.FanInThroughputSpec.real-message = off org.apache.pekko.test.FanInThroughputSpec.actor-selection = off - akka.remote.artery.advanced { + pekko.remote.artery.advanced { # inbound-lanes = 4 } """)).withFallback(MaxThroughputSpec.cfg).withFallback(RemotingMultiNodeSpec.commonConfig)) diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/HandshakeRestartReceiverSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/HandshakeRestartReceiverSpec.scala index 8249076b04..a0441776ab 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/HandshakeRestartReceiverSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/HandshakeRestartReceiverSpec.scala @@ -26,7 +26,7 @@ object HandshakeRestartReceiverSpec extends MultiNodeConfig { val second = role("second") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka { + pekko { loglevel = INFO actor.provider = remote remote.artery { @@ -34,8 +34,8 @@ object HandshakeRestartReceiverSpec extends MultiNodeConfig { } } # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """))) class Subject extends Actor { @@ -124,7 +124,7 @@ abstract class HandshakeRestartReceiverSpec val freshSystem = ActorSystem( system.name, ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port = ${address.port.get} + pekko.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) freshSystem.actorOf(Props[Subject](), "subject2") diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/LatencySpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/LatencySpec.scala index 3eae30dc36..bce07cd2af 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/LatencySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/LatencySpec.scala @@ -35,7 +35,7 @@ object LatencySpec extends MultiNodeConfig { org.apache.pekko.test.LatencySpec.totalMessagesFactor = 1.0 org.apache.pekko.test.LatencySpec.repeatCount = 1 org.apache.pekko.test.LatencySpec.real-message = off - akka { + pekko { loglevel = ERROR # avoid TestEventListener loggers = ["org.apache.pekko.event.Logging$$DefaultLogger"] diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/MaxThroughputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/MaxThroughputSpec.scala index 086f236a61..89f76f1f8d 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/MaxThroughputSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/MaxThroughputSpec.scala @@ -32,7 +32,7 @@ object MaxThroughputSpec extends MultiNodeConfig { org.apache.pekko.test.MaxThroughputSpec.totalMessagesFactor = 160.0 org.apache.pekko.test.MaxThroughputSpec.real-message = off org.apache.pekko.test.MaxThroughputSpec.actor-selection = off - akka { + pekko { loglevel = INFO log-dead-letters = 100 # avoid TestEventListener @@ -72,7 +72,7 @@ object MaxThroughputSpec extends MultiNodeConfig { } } } - akka.remote.default-remote-dispatcher { + pekko.remote.default-remote-dispatcher { fork-join-executor { # parallelism-factor = 0.5 parallelism-min = 4 @@ -108,7 +108,7 @@ object MaxThroughputSpec extends MultiNodeConfig { } def receiverProps(reporter: RateReporter, payloadSize: Int, numSenders: Int): Props = - Props(new Receiver(reporter, payloadSize, numSenders)).withDispatcher("akka.remote.default-remote-dispatcher") + Props(new Receiver(reporter, payloadSize, numSenders)).withDispatcher("pekko.remote.default-remote-dispatcher") class Receiver(reporter: RateReporter, payloadSize: Int, numSenders: Int) extends Actor { private var c = 0L diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/RemoteRestartedQuarantinedSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/RemoteRestartedQuarantinedSpec.scala index 21bd3bf600..d7b4fd5cfb 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/RemoteRestartedQuarantinedSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/RemoteRestartedQuarantinedSpec.scala @@ -24,12 +24,12 @@ object RemoteRestartedQuarantinedSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.loglevel = WARNING - akka.remote.log-remote-lifecycle-events = WARNING - akka.remote.artery.enabled = on + pekko.loglevel = WARNING + pekko.remote.log-remote-lifecycle-events = WARNING + pekko.remote.artery.enabled = on # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """)) .withFallback(RemotingMultiNodeSpec.commonConfig)) @@ -119,7 +119,7 @@ abstract class RemoteRestartedQuarantinedSpec extends RemotingMultiNodeSpec(Remo val freshSystem = ActorSystem( system.name, ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port = ${address.port.get} + pekko.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) val probe = TestProbe()(freshSystem) diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala index ee6a29b77f..1dc6d16cfb 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala @@ -24,8 +24,8 @@ object SurviveInboundStreamRestartWithCompressionInFlightSpec extends MultiNodeC commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.remote.artery { + pekko.loglevel = INFO + pekko.remote.artery { enabled = on advanced { inbound-lanes = 4 diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/SurviveNetworkPartitionSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/SurviveNetworkPartitionSpec.scala index 39e662a7ff..03fe0a28fe 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/SurviveNetworkPartitionSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/SurviveNetworkPartitionSpec.scala @@ -25,10 +25,10 @@ object SurviveNetworkPartitionSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.remote.artery.enabled = on - akka.remote.artery.advanced.give-up-system-message-after = 4s - akka.remote.use-unsafe-remote-features-outside-cluster = on + pekko.loglevel = INFO + pekko.remote.artery.enabled = on + pekko.remote.artery.advanced.give-up-system-message-after = 4s + pekko.remote.use-unsafe-remote-features-outside-cluster = on """)) .withFallback(RemotingMultiNodeSpec.commonConfig)) diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/aeron/AeronStreamConcistencySpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/aeron/AeronStreamConcistencySpec.scala index da55338e61..5d2777db31 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/aeron/AeronStreamConcistencySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/aeron/AeronStreamConcistencySpec.scala @@ -34,7 +34,7 @@ object AeronStreamConsistencySpec extends MultiNodeConfig { val barrierTimeout = 5.minutes commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka { + pekko { loglevel = INFO actor { provider = remote @@ -62,7 +62,7 @@ abstract class AeronStreamConsistencySpec Aeron.connect(ctx) } - val idleCpuLevel = system.settings.config.getInt("akka.remote.artery.advanced.aeron.idle-cpu-level") + val idleCpuLevel = system.settings.config.getInt("pekko.remote.artery.advanced.aeron.idle-cpu-level") val taskRunner = { val r = new TaskRunner(system.asInstanceOf[ExtendedActorSystem], idleCpuLevel) r.start() diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/aeron/AeronStreamLatencySpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/aeron/AeronStreamLatencySpec.scala index c7607f5722..46e91096f1 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/aeron/AeronStreamLatencySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/aeron/AeronStreamLatencySpec.scala @@ -45,7 +45,7 @@ object AeronStreamLatencySpec extends MultiNodeConfig { # for serious measurements you should increase the totalMessagesFactor (10) and repeatCount (3) org.apache.pekko.test.AeronStreamLatencySpec.totalMessagesFactor = 1.0 org.apache.pekko.test.AeronStreamLatencySpec.repeatCount = 1 - akka { + pekko { loglevel = INFO testconductor.barrier-timeout = ${barrierTimeout.toSeconds}s actor { @@ -96,7 +96,7 @@ abstract class AeronStreamLatencySpec Aeron.connect(ctx) } - val idleCpuLevel = system.settings.config.getInt("akka.remote.artery.advanced.aeron.idle-cpu-level") + val idleCpuLevel = system.settings.config.getInt("pekko.remote.artery.advanced.aeron.idle-cpu-level") val taskRunner = { val r = new TaskRunner(system.asInstanceOf[ExtendedActorSystem], idleCpuLevel) r.start() diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/aeron/AeronStreamMaxThroughputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/aeron/AeronStreamMaxThroughputSpec.scala index b55e30ab93..fcaf41a89b 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/aeron/AeronStreamMaxThroughputSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/artery/aeron/AeronStreamMaxThroughputSpec.scala @@ -35,7 +35,7 @@ object AeronStreamMaxThroughputSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" # for serious measurements you should increase the totalMessagesFactor (20) org.apache.pekko.test.AeronStreamMaxThroughputSpec.totalMessagesFactor = 1.0 - akka { + pekko { loglevel = ERROR testconductor.barrier-timeout = ${barrierTimeout.toSeconds}s actor { @@ -91,7 +91,7 @@ abstract class AeronStreamMaxThroughputSpec Aeron.connect(ctx) } - val idleCpuLevel = system.settings.config.getInt("akka.remote.artery.advanced.aeron.idle-cpu-level") + val idleCpuLevel = system.settings.config.getInt("pekko.remote.artery.advanced.aeron.idle-cpu-level") val taskRunner = { val r = new TaskRunner(system.asInstanceOf[ExtendedActorSystem], idleCpuLevel) r.start() diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/RemoteGatePiercingSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/RemoteGatePiercingSpec.scala index 2584d5372c..c940d719f0 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/RemoteGatePiercingSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/RemoteGatePiercingSpec.scala @@ -26,15 +26,15 @@ object RemoteGatePiercingSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false).withFallback( ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.remote.artery.enabled = false - akka.remote.classic.log-remote-lifecycle-events = INFO - akka.remote.classic.transport-failure-detector.acceptable-heartbeat-pause = 5 s + pekko.loglevel = INFO + pekko.remote.artery.enabled = false + pekko.remote.classic.log-remote-lifecycle-events = INFO + pekko.remote.classic.transport-failure-detector.acceptable-heartbeat-pause = 5 s """))) - nodeConfig(first)(ConfigFactory.parseString("akka.remote.classic.retry-gate-closed-for = 1 d # Keep it long")) + nodeConfig(first)(ConfigFactory.parseString("pekko.remote.classic.retry-gate-closed-for = 1 d # Keep it long")) - nodeConfig(second)(ConfigFactory.parseString("akka.remote.classic.retry-gate-closed-for = 1 s # Keep it short")) + nodeConfig(second)(ConfigFactory.parseString("pekko.remote.classic.retry-gate-closed-for = 1 s # Keep it short")) testTransport(on = true) diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/RemoteNodeRestartGateSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/RemoteNodeRestartGateSpec.scala index 60952d1236..8d56813b9d 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/RemoteNodeRestartGateSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/RemoteNodeRestartGateSpec.scala @@ -26,10 +26,10 @@ object RemoteNodeRestartGateSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.remote.artery.enabled = off - akka.loglevel = INFO - akka.remote.classic.log-remote-lifecycle-events = INFO - akka.remote.classic.retry-gate-closed-for = 1d # Keep it long + pekko.remote.artery.enabled = off + pekko.loglevel = INFO + pekko.remote.classic.log-remote-lifecycle-events = INFO + pekko.remote.classic.retry-gate-closed-for = 1d # Keep it long """))) testTransport(on = true) @@ -102,8 +102,8 @@ abstract class RemoteNodeRestartGateSpec extends RemotingMultiNodeSpec(RemoteNod val freshSystem = ActorSystem( system.name, ConfigFactory.parseString(s""" - akka.remote.retry-gate-closed-for = 0.5 s - akka.remote.classic.netty.tcp { + pekko.remote.retry-gate-closed-for = 0.5 s + pekko.remote.classic.netty.tcp { hostname = ${address.host.get} port = ${address.port.get} } diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/RemoteNodeShutdownAndComesBackSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/RemoteNodeShutdownAndComesBackSpec.scala index a00df33971..576cc27b71 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/RemoteNodeShutdownAndComesBackSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/RemoteNodeShutdownAndComesBackSpec.scala @@ -25,14 +25,14 @@ object RemoteNodeShutdownAndComesBackSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false).withFallback( ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.remote.artery.enabled = off - akka.remote.classic.log-remote-lifecycle-events = INFO + pekko.loglevel = INFO + pekko.remote.artery.enabled = off + pekko.remote.classic.log-remote-lifecycle-events = INFO ## Keep it tight, otherwise reestablishing a connection takes too much time - akka.remote.classic.transport-failure-detector.heartbeat-interval = 1 s - akka.remote.classic.transport-failure-detector.acceptable-heartbeat-pause = 3 s - akka.remote.watch-failure-detector.acceptable-heartbeat-pause = 60 s - akka.remote.use-unsafe-remote-features-outside-cluster = on + pekko.remote.classic.transport-failure-detector.heartbeat-interval = 1 s + pekko.remote.classic.transport-failure-detector.acceptable-heartbeat-pause = 3 s + pekko.remote.watch-failure-detector.acceptable-heartbeat-pause = 60 s + pekko.remote.use-unsafe-remote-features-outside-cluster = on """))) testTransport(on = true) @@ -140,8 +140,8 @@ abstract class RemoteNodeShutdownAndComesBackSpec extends RemotingMultiNodeSpec( val freshSystem = ActorSystem( system.name, ConfigFactory.parseString(s""" - akka.remote.classic.netty.tcp.port = ${address.port.get} - akka.remote.artery.canonical.port = ${address.port.get} + pekko.remote.classic.netty.tcp.port = ${address.port.get} + pekko.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) freshSystem.actorOf(Props[Subject](), "subject") diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/Ticket15109Spec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/Ticket15109Spec.scala index 381d65ea0d..c393f0896e 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/Ticket15109Spec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/classic/Ticket15109Spec.scala @@ -26,13 +26,13 @@ object Ticket15109Spec extends MultiNodeConfig { commonConfig( debugConfig(on = false).withFallback( ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.remote.artery.enabled = off - akka.remote.classic.log-remote-lifecycle-events = INFO + pekko.loglevel = INFO + pekko.remote.artery.enabled = off + pekko.remote.classic.log-remote-lifecycle-events = INFO ## Keep it tight, otherwise reestablishing a connection takes too much time - akka.remote.classic.transport-failure-detector.heartbeat-interval = 1 s - akka.remote.classic.transport-failure-detector.acceptable-heartbeat-pause = 3 s - akka.remote.classic.retry-gate-closed-for = 0.5 s + pekko.remote.classic.transport-failure-detector.heartbeat-interval = 1 s + pekko.remote.classic.transport-failure-detector.acceptable-heartbeat-pause = 3 s + pekko.remote.classic.retry-gate-closed-for = 0.5 s """))) testTransport(on = true) diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/routing/RemoteRandomSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/routing/RemoteRandomSpec.scala index e9e8d927cf..a6c5f1c041 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/routing/RemoteRandomSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/routing/RemoteRandomSpec.scala @@ -29,8 +29,8 @@ class RemoteRandomConfig(artery: Boolean) extends MultiNodeConfig { val fourth = role("fourth") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.remote.artery.enabled = $artery - akka.remote.use-unsafe-remote-features-outside-cluster = on + pekko.remote.artery.enabled = $artery + pekko.remote.use-unsafe-remote-features-outside-cluster = on """)).withFallback(RemotingMultiNodeSpec.commonConfig)) deployOnAll(""" diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/routing/RemoteRoundRobinSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/routing/RemoteRoundRobinSpec.scala index 8a4da71af0..c8734dc224 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/routing/RemoteRoundRobinSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/routing/RemoteRoundRobinSpec.scala @@ -26,8 +26,8 @@ class RemoteRoundRobinConfig(artery: Boolean) extends MultiNodeConfig { val fourth = role("fourth") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.remote.artery.enabled = $artery - akka.remote.use-unsafe-remote-features-outside-cluster = on + pekko.remote.artery.enabled = $artery + pekko.remote.use-unsafe-remote-features-outside-cluster = on """)).withFallback(RemotingMultiNodeSpec.commonConfig)) deployOnAll(""" diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/routing/RemoteScatterGatherSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/routing/RemoteScatterGatherSpec.scala index 41d8d9fb70..74370ea49d 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/routing/RemoteScatterGatherSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/routing/RemoteScatterGatherSpec.scala @@ -29,8 +29,8 @@ class RemoteScatterGatherConfig(artery: Boolean) extends MultiNodeConfig { val fourth = role("fourth") commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" - akka.remote.artery.enabled = $artery - akka.remote.use-unsafe-remote-features-outside-cluster = on + pekko.remote.artery.enabled = $artery + pekko.remote.use-unsafe-remote-features-outside-cluster = on """)).withFallback(RemotingMultiNodeSpec.commonConfig)) deployOnAll(""" diff --git a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/testconductor/TestConductorSpec.scala index a192af8630..06d523454b 100644 --- a/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/org/apache/pekko/remote/testconductor/TestConductorSpec.scala @@ -18,7 +18,7 @@ import pekko.testkit.LongRunningTest object TestConductorMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka.remote.artery.enabled = false + pekko.remote.artery.enabled = false """)).withFallback(RemotingMultiNodeSpec.commonConfig)) val leader = role("leader") diff --git a/akka-remote-tests/src/test/scala/org/apache/pekko/remote/artery/ArteryFailedToBindSpec.scala b/akka-remote-tests/src/test/scala/org/apache/pekko/remote/artery/ArteryFailedToBindSpec.scala index 65bd5e6af9..d506c199e9 100644 --- a/akka-remote-tests/src/test/scala/org/apache/pekko/remote/artery/ArteryFailedToBindSpec.scala +++ b/akka-remote-tests/src/test/scala/org/apache/pekko/remote/artery/ArteryFailedToBindSpec.scala @@ -20,14 +20,14 @@ class ArteryFailedToBindSpec extends AnyWordSpec with Matchers { "an ActorSystem" must { "not start if port is taken" in { - // this test is tweaked in Jenkins CI by passing -Dakka.remote.artery.transport + // this test is tweaked in Jenkins CI by passing -Dpekko.remote.artery.transport // therefore we must decide whether to use UDP or not based on the runtime config - val arterySettings = ArterySettings(ConfigFactory.load().getConfig("akka.remote.artery")) + val arterySettings = ArterySettings(ConfigFactory.load().getConfig("pekko.remote.artery")) val useUdp = arterySettings.Transport == ArterySettings.AeronUpd val port = SocketUtil.temporaryLocalPort(useUdp) val config = ConfigFactory.parseString(s""" - |akka { + |pekko { | actor { | provider = remote | } diff --git a/akka-remote-tests/src/test/scala/org/apache/pekko/remote/classic/RemotingFailedToBindSpec.scala b/akka-remote-tests/src/test/scala/org/apache/pekko/remote/classic/RemotingFailedToBindSpec.scala index 7ee83be718..98a72dd5d3 100644 --- a/akka-remote-tests/src/test/scala/org/apache/pekko/remote/classic/RemotingFailedToBindSpec.scala +++ b/akka-remote-tests/src/test/scala/org/apache/pekko/remote/classic/RemotingFailedToBindSpec.scala @@ -19,7 +19,7 @@ class RemotingFailedToBindSpec extends AnyWordSpec with Matchers { "not start if port is taken" in { val port = SocketUtil.temporaryLocalPort() val config = ConfigFactory.parseString(s""" - |akka { + |pekko { | actor { | provider = remote | } diff --git a/akka-remote-tests/src/test/scala/org/apache/pekko/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/org/apache/pekko/remote/testconductor/BarrierSpec.scala index ecd1ca2128..c136a33ad4 100644 --- a/akka-remote-tests/src/test/scala/org/apache/pekko/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/org/apache/pekko/remote/testconductor/BarrierSpec.scala @@ -17,10 +17,10 @@ import pekko.testkit.{ AkkaSpec, EventFilter, ImplicitSender, TestProbe, TimingT object BarrierSpec { final case class Failed(ref: ActorRef, thr: Throwable) val config = """ - akka.testconductor.barrier-timeout = 5s - akka.actor.provider = remote - akka.actor.debug.fsm = on - akka.actor.debug.lifecycle = on + pekko.testconductor.barrier-timeout = 5s + pekko.actor.provider = remote + pekko.actor.debug.fsm = on + pekko.actor.debug.lifecycle = on """ } diff --git a/akka-remote-tests/src/test/scala/org/apache/pekko/remote/testconductor/ControllerSpec.scala b/akka-remote-tests/src/test/scala/org/apache/pekko/remote/testconductor/ControllerSpec.scala index a7d43a0e5a..9d272e32b4 100644 --- a/akka-remote-tests/src/test/scala/org/apache/pekko/remote/testconductor/ControllerSpec.scala +++ b/akka-remote-tests/src/test/scala/org/apache/pekko/remote/testconductor/ControllerSpec.scala @@ -14,10 +14,10 @@ import org.apache.pekko.testkit.ImplicitSender object ControllerSpec { val config = """ - akka.testconductor.barrier-timeout = 5s - akka.actor.provider = remote - akka.actor.debug.fsm = on - akka.actor.debug.lifecycle = on + pekko.testconductor.barrier-timeout = 5s + pekko.actor.provider = remote + pekko.actor.debug.fsm = on + pekko.actor.debug.lifecycle = on """ } diff --git a/akka-remote-tests/src/test/scala/org/scalatest/extra/QuietReporter.scala b/akka-remote-tests/src/test/scala/org/scalatest/extra/QuietReporter.scala index 5a297d8130..9609eacdaa 100644 --- a/akka-remote-tests/src/test/scala/org/scalatest/extra/QuietReporter.scala +++ b/akka-remote-tests/src/test/scala/org/scalatest/extra/QuietReporter.scala @@ -12,7 +12,7 @@ import org.scalatest.tools.StandardOutReporter class QuietReporter(inColor: Boolean, withDurations: Boolean = false) extends StandardOutReporter(withDurations, inColor, false, true, false, false, false, false, false, false, false) { - def this() = this(!getBoolean("akka.test.nocolor"), !getBoolean("akka.test.nodurations")) + def this() = this(!getBoolean("pekko.test.nocolor"), !getBoolean("pekko.test.nodurations")) override def apply(event: Event): Unit = event match { case _: RunStarting => () diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 174fe14e1c..773d3a67ce 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -1,32 +1,32 @@ #//#shared ##################################### -# Akka Remote Reference Config File # +# Pekko Remote Reference Config File # ##################################### # This is the reference config file that contains all the default settings. # Make your edits/overrides in your application.conf. -# comments about akka.actor settings left out where they are already in akka- +# comments about pekko.actor settings left out where they are already in pekko- # actor.jar, because otherwise they would be repeated in config rendering. # # For the configuration of the new remoting implementation (Artery) please look # at the bottom section of this file as it is listed separately. -akka { +pekko { actor { serializers { - akka-containers = "org.apache.pekko.remote.serialization.MessageContainerSerializer" - akka-misc = "org.apache.pekko.remote.serialization.MiscMessageSerializer" + pekko-containers = "org.apache.pekko.remote.serialization.MessageContainerSerializer" + pekko-misc = "org.apache.pekko.remote.serialization.MiscMessageSerializer" artery = "org.apache.pekko.remote.serialization.ArteryMessageSerializer" proto = "org.apache.pekko.remote.serialization.ProtobufSerializer" daemon-create = "org.apache.pekko.remote.serialization.DaemonMsgCreateSerializer" - akka-system-msg = "org.apache.pekko.remote.serialization.SystemMessageSerializer" + pekko-system-msg = "org.apache.pekko.remote.serialization.SystemMessageSerializer" } serialization-bindings { - "org.apache.pekko.actor.ActorSelectionMessage" = akka-containers + "org.apache.pekko.actor.ActorSelectionMessage" = pekko-containers "org.apache.pekko.remote.DaemonMsgCreate" = daemon-create @@ -35,7 +35,7 @@ akka { # Since org.apache.pekko.protobuf.Message does not extend Serializable but # GeneratedMessage does, need to use the more specific one here in order # to avoid ambiguity. - # This is only loaded if akka-protobuf is on the classpath + # This is only loaded if pekko-protobuf is on the classpath # It should not be used and users should migrate to using the protobuf classes # directly # Remove in 2.7 @@ -51,56 +51,56 @@ akka { "com.google.protobuf.GeneratedMessage" = proto "com.google.protobuf.GeneratedMessageV3" = proto - "org.apache.pekko.actor.Identify" = akka-misc - "org.apache.pekko.actor.ActorIdentity" = akka-misc - "scala.Some" = akka-misc - "scala.None$" = akka-misc - "java.util.Optional" = akka-misc - "org.apache.pekko.actor.Status$Success" = akka-misc - "org.apache.pekko.actor.Status$Failure" = akka-misc - "org.apache.pekko.actor.ActorRef" = akka-misc - "org.apache.pekko.actor.PoisonPill$" = akka-misc - "org.apache.pekko.actor.Kill$" = akka-misc - "org.apache.pekko.remote.RemoteWatcher$Heartbeat$" = akka-misc - "org.apache.pekko.remote.RemoteWatcher$HeartbeatRsp" = akka-misc - "org.apache.pekko.Done" = akka-misc - "org.apache.pekko.NotUsed" = akka-misc - "org.apache.pekko.actor.Address" = akka-misc - "org.apache.pekko.remote.UniqueAddress" = akka-misc + "org.apache.pekko.actor.Identify" = pekko-misc + "org.apache.pekko.actor.ActorIdentity" = pekko-misc + "scala.Some" = pekko-misc + "scala.None$" = pekko-misc + "java.util.Optional" = pekko-misc + "org.apache.pekko.actor.Status$Success" = pekko-misc + "org.apache.pekko.actor.Status$Failure" = pekko-misc + "org.apache.pekko.actor.ActorRef" = pekko-misc + "org.apache.pekko.actor.PoisonPill$" = pekko-misc + "org.apache.pekko.actor.Kill$" = pekko-misc + "org.apache.pekko.remote.RemoteWatcher$Heartbeat$" = pekko-misc + "org.apache.pekko.remote.RemoteWatcher$HeartbeatRsp" = pekko-misc + "org.apache.pekko.Done" = pekko-misc + "org.apache.pekko.NotUsed" = pekko-misc + "org.apache.pekko.actor.Address" = pekko-misc + "org.apache.pekko.remote.UniqueAddress" = pekko-misc - "org.apache.pekko.actor.ActorInitializationException" = akka-misc - "org.apache.pekko.actor.IllegalActorStateException" = akka-misc - "org.apache.pekko.actor.ActorKilledException" = akka-misc - "org.apache.pekko.actor.InvalidActorNameException" = akka-misc - "org.apache.pekko.actor.InvalidMessageException" = akka-misc - "java.util.concurrent.TimeoutException" = akka-misc - "org.apache.pekko.remote.serialization.ThrowableNotSerializableException" = akka-misc + "org.apache.pekko.actor.ActorInitializationException" = pekko-misc + "org.apache.pekko.actor.IllegalActorStateException" = pekko-misc + "org.apache.pekko.actor.ActorKilledException" = pekko-misc + "org.apache.pekko.actor.InvalidActorNameException" = pekko-misc + "org.apache.pekko.actor.InvalidMessageException" = pekko-misc + "java.util.concurrent.TimeoutException" = pekko-misc + "org.apache.pekko.remote.serialization.ThrowableNotSerializableException" = pekko-misc - "org.apache.pekko.actor.LocalScope$" = akka-misc - "org.apache.pekko.remote.RemoteScope" = akka-misc + "org.apache.pekko.actor.LocalScope$" = pekko-misc + "org.apache.pekko.remote.RemoteScope" = pekko-misc - "com.typesafe.config.impl.SimpleConfig" = akka-misc - "com.typesafe.config.Config" = akka-misc + "com.typesafe.config.impl.SimpleConfig" = pekko-misc + "com.typesafe.config.Config" = pekko-misc - "org.apache.pekko.routing.FromConfig" = akka-misc - "org.apache.pekko.routing.DefaultResizer" = akka-misc - "org.apache.pekko.routing.BalancingPool" = akka-misc - "org.apache.pekko.routing.BroadcastGroup" = akka-misc - "org.apache.pekko.routing.BroadcastPool" = akka-misc - "org.apache.pekko.routing.RandomGroup" = akka-misc - "org.apache.pekko.routing.RandomPool" = akka-misc - "org.apache.pekko.routing.RoundRobinGroup" = akka-misc - "org.apache.pekko.routing.RoundRobinPool" = akka-misc - "org.apache.pekko.routing.ScatterGatherFirstCompletedGroup" = akka-misc - "org.apache.pekko.routing.ScatterGatherFirstCompletedPool" = akka-misc - "org.apache.pekko.routing.SmallestMailboxPool" = akka-misc - "org.apache.pekko.routing.TailChoppingGroup" = akka-misc - "org.apache.pekko.routing.TailChoppingPool" = akka-misc - "org.apache.pekko.remote.routing.RemoteRouterConfig" = akka-misc + "org.apache.pekko.routing.FromConfig" = pekko-misc + "org.apache.pekko.routing.DefaultResizer" = pekko-misc + "org.apache.pekko.routing.BalancingPool" = pekko-misc + "org.apache.pekko.routing.BroadcastGroup" = pekko-misc + "org.apache.pekko.routing.BroadcastPool" = pekko-misc + "org.apache.pekko.routing.RandomGroup" = pekko-misc + "org.apache.pekko.routing.RandomPool" = pekko-misc + "org.apache.pekko.routing.RoundRobinGroup" = pekko-misc + "org.apache.pekko.routing.RoundRobinPool" = pekko-misc + "org.apache.pekko.routing.ScatterGatherFirstCompletedGroup" = pekko-misc + "org.apache.pekko.routing.ScatterGatherFirstCompletedPool" = pekko-misc + "org.apache.pekko.routing.SmallestMailboxPool" = pekko-misc + "org.apache.pekko.routing.TailChoppingGroup" = pekko-misc + "org.apache.pekko.routing.TailChoppingPool" = pekko-misc + "org.apache.pekko.remote.routing.RemoteRouterConfig" = pekko-misc - "org.apache.pekko.pattern.StatusReply" = akka-misc + "org.apache.pekko.pattern.StatusReply" = pekko-misc - "org.apache.pekko.dispatch.sysmsg.SystemMessage" = akka-system-msg + "org.apache.pekko.dispatch.sysmsg.SystemMessage" = pekko-system-msg # Java Serializer is by default used for exceptions and will by default # not be allowed to be serialized, but in certain cases they are replaced @@ -110,7 +110,7 @@ akka { # - when wrapped in system messages for exceptions from remote deployed child actors # # It's recommended that you implement custom serializer for exceptions that are - # sent remotely, You can add binding to akka-misc (MiscMessageSerializer) for the + # sent remotely, You can add binding to pekko-misc (MiscMessageSerializer) for the # exceptions that have a constructor with single message String or constructor with # message String as first parameter and cause Throwable as second parameter. Note that it's not # safe to add this binding for general exceptions such as IllegalArgumentException @@ -127,13 +127,13 @@ akka { "org.apache.pekko.remote.serialization.SystemMessageSerializer" = 22 - # deprecated in 2.6.0, moved to akka-actor + # deprecated in 2.6.0, moved to pekko-actor "org.apache.pekko.remote.serialization.LongSerializer" = 18 - # deprecated in 2.6.0, moved to akka-actor + # deprecated in 2.6.0, moved to pekko-actor "org.apache.pekko.remote.serialization.IntSerializer" = 19 - # deprecated in 2.6.0, moved to akka-actor + # deprecated in 2.6.0, moved to pekko-actor "org.apache.pekko.remote.serialization.StringSerializer" = 20 - # deprecated in 2.6.0, moved to akka-actor + # deprecated in 2.6.0, moved to pekko-actor "org.apache.pekko.remote.serialization.ByteStringSerializer" = 21 } @@ -241,13 +241,13 @@ akka { # If true, will only allow specific classes listed in `allowed-actor-classes` to be instanciated on this # system via remote deployment - enable-allow-list = ${akka.remote.deployment.enable-whitelist} + enable-allow-list = ${pekko.remote.deployment.enable-whitelist} # deprecated, use `allowed-actor-classes` whitelist = [] - allowed-actor-classes = ${akka.remote.deployment.whitelist} + allowed-actor-classes = ${pekko.remote.deployment.whitelist} } ### Default dispatcher for the remoting subsystem @@ -266,7 +266,7 @@ akka { } -akka { +pekko { remote { #//#classic @@ -280,7 +280,7 @@ akka { # that since remoting can load arbitrary 3rd party drivers (see # "enabled-transport" and "adapters" entries) it is not guaranteed that # every module will respect this setting. - use-dispatcher = "akka.remote.default-remote-dispatcher" + use-dispatcher = "pekko.remote.default-remote-dispatcher" # Settings for the failure detector to monitor connections. # For TCP it is not important to have fast failure detection, since @@ -335,7 +335,7 @@ akka { command-ack-timeout = 30 s # The timeout for outbound associations to perform the handshake. - # If the transport is akka.remote.classic.netty.tcp or akka.remote.classic.netty.ssl + # If the transport is pekko.remote.classic.netty.tcp or pekko.remote.classic.netty.ssl # the configured connection-timeout for the transport will be used instead. handshake-timeout = 15 s @@ -354,15 +354,15 @@ akka { ### Logging - # If this is "on", Akka will log all inbound messages at DEBUG level, + # If this is "on", Pekko will log all inbound messages at DEBUG level, # if off then they are not logged log-received-messages = off - # If this is "on", Akka will log all outbound messages at DEBUG level, + # If this is "on", Pekko will log all outbound messages at DEBUG level, # if off then they are not logged log-sent-messages = off - # Sets the log granularity level at which Akka logs remoting events. This setting + # Sets the log granularity level at which Pekko logs remoting events. This setting # can take the values OFF, ERROR, WARNING, INFO, DEBUG, or ON. For compatibility # reasons the setting "on" will default to "debug" level. Please note that the effective # logging level is still determined by the global logging level of the actor system: @@ -466,7 +466,7 @@ akka { # pointing to an implementation class of the Transport interface. # If multiple transports are provided, the address of the first # one will be used as a default address. - enabled-transports = ["akka.remote.classic.netty.tcp"] + enabled-transports = ["pekko.remote.classic.netty.tcp"] # Transport drivers can be augmented with adapters by adding their # name to the applied-adapters setting in the configuration of a @@ -493,7 +493,7 @@ akka { # name to the applied-adapters list. The last adapter in the # list is the adapter immediately above the driver, while # the first one is the top of the stack below the standard - # Akka protocol + # Pekko protocol applied-adapters = [] # The default remote server port clients should connect to. @@ -507,41 +507,41 @@ akka { # Use this setting to bind a network interface to a different port # than remoting protocol expects messages at. This may be used - # when running akka nodes in a separated networks (under NATs or docker containers). + # when running pekko nodes in a separated networks (under NATs or docker containers). # Use 0 if you want a random available port. Examples: # - # akka.remote.classic.netty.tcp.port = 2552 - # akka.remote.classic.netty.tcp.bind-port = 2553 + # pekko.remote.classic.netty.tcp.port = 2552 + # pekko.remote.classic.netty.tcp.bind-port = 2553 # Network interface will be bound to the 2553 port, but remoting protocol will # expect messages sent to port 2552. # - # akka.remote.classic.netty.tcp.port = 0 - # akka.remote.classic.netty.tcp.bind-port = 0 + # pekko.remote.classic.netty.tcp.port = 0 + # pekko.remote.classic.netty.tcp.bind-port = 0 # Network interface will be bound to a random port, and remoting protocol will # expect messages sent to the bound port. # - # akka.remote.classic.netty.tcp.port = 2552 - # akka.remote.classic.netty.tcp.bind-port = 0 + # pekko.remote.classic.netty.tcp.port = 2552 + # pekko.remote.classic.netty.tcp.bind-port = 0 # Network interface will be bound to a random port, but remoting protocol will # expect messages sent to port 2552. # - # akka.remote.classic.netty.tcp.port = 0 - # akka.remote.classic.netty.tcp.bind-port = 2553 + # pekko.remote.classic.netty.tcp.port = 0 + # pekko.remote.classic.netty.tcp.bind-port = 2553 # Network interface will be bound to the 2553 port, and remoting protocol will # expect messages sent to the bound port. # - # akka.remote.classic.netty.tcp.port = 2552 - # akka.remote.classic.netty.tcp.bind-port = "" + # pekko.remote.classic.netty.tcp.port = 2552 + # pekko.remote.classic.netty.tcp.bind-port = "" # Network interface will be bound to the 2552 port, and remoting protocol will # expect messages sent to the bound port. # - # akka.remote.classic.netty.tcp.port if empty + # pekko.remote.classic.netty.tcp.port if empty bind-port = "" # Use this setting to bind a network interface to a different hostname or ip # than remoting protocol expects messages at. # Use "0.0.0.0" to bind to all interfaces. - # akka.remote.classic.netty.tcp.hostname if empty + # pekko.remote.classic.netty.tcp.hostname if empty bind-hostname = "" # Enables SSL support on this transport @@ -555,7 +555,7 @@ akka { # will be used to accept inbound connections, and perform IO. If "" then # dedicated threads will be used. # Please note that the Netty driver only uses this configuration and does - # not read the "akka.remote.use-dispatcher" entry. Instead it has to be + # not read the "pekko.remote.use-dispatcher" entry. Instead it has to be # configured manually to point to the same dispatcher if needed. use-dispatcher-for-io = "" @@ -632,7 +632,7 @@ akka { } - netty.ssl = ${akka.remote.classic.netty.tcp} + netty.ssl = ${pekko.remote.classic.netty.tcp} netty.ssl = { # Enable SSL/TLS encryption. # This must be enabled on both the client and server to work. @@ -642,7 +642,7 @@ akka { # Must implement org.apache.pekko.remote.transport.netty.SSLEngineProvider and have a public # constructor with an ActorSystem parameter. # The default ConfigSSLEngineProvider is configured by properties in section - # akka.remote.classic.netty.ssl.security + # pekko.remote.classic.netty.ssl.security # # The SSLEngineProvider can also be defined via ActorSystemSetup with # SSLEngineProviderSetup when starting the ActorSystem. That is useful when @@ -700,13 +700,13 @@ akka { # To prevent man-in-the-middle attacks this setting is enabled by default. # # Note: Nodes that are configured with this setting to 'on' might not be able to receive messages from nodes that - # run on older versions of akka-remote. This is because in versions of Akka < 2.4.12 the active side of the remoting + # run on older versions of pekko-remote. This is because in versions of Pekko < 2.4.12 the active side of the remoting # connection will not send over certificates even if asked. # - # However, starting with Akka 2.4.12, even with this setting "off", the active side (TLS client side) + # However, starting with Pekko 2.4.12, even with this setting "off", the active side (TLS client side) # will use the given key-store to send over a certificate if asked. A rolling upgrade from versions of - # Akka < 2.4.12 can therefore work like this: - # - upgrade all nodes to an Akka version >= 2.4.12, in the best case the latest version, but keep this setting at "off" + # Pekko < 2.4.12 can therefore work like this: + # - upgrade all nodes to an Pekko version >= 2.4.12, in the best case the latest version, but keep this setting at "off" # - then switch this flag to "on" and do again a rolling upgrade of all nodes # The first step ensures that all nodes will send over a certificate when asked to. The second # step will ensure that all nodes finally enforce the secure checking of client certificates. @@ -736,7 +736,7 @@ akka { #//#classic #//#artery -akka { +pekko { remote { @@ -749,7 +749,7 @@ akka { # Select the underlying transport implementation. # # Possible values: aeron-udp, tcp, tls-tcp - # See https://doc.akka.io/docs/akka/current/remoting-artery.html#selecting-a-transport for the tradeoffs + # See https://doc.akka.io/docs/pekko/current/remoting-artery.html#selecting-a-transport for the tradeoffs # for each transport transport = tcp @@ -771,7 +771,7 @@ akka { } # Use these settings to bind a network interface to a different address - # than artery expects messages at. This may be used when running Akka + # than artery expects messages at. This may be used when running Pekko # nodes in a separated networks (under NATs or in containers). If canonical # and bind addresses are different, then network configuration that relays # communications from canonical to bind addresses is expected. @@ -780,14 +780,14 @@ akka { # Port to bind a network interface to. Can be set to a port number # of one of the following special values: # 0 random available port - # "" akka.remote.artery.canonical.port + # "" pekko.remote.artery.canonical.port # port = "" # Hostname to bind a network interface to. Can be set to an ip, hostname # or one of the following special values: # "0.0.0.0" all interfaces - # "" akka.remote.artery.canonical.hostname + # "" pekko.remote.artery.canonical.hostname # "" InetAddress.getLocalHost.getHostAddress # "" InetAddress.getLocalHost.getHostName # @@ -854,7 +854,7 @@ akka { buffer-pool-size = 128 # Maximum serialized message size for the large messages, including header data. - # If the value of akka.remote.artery.transport is set to aeron-udp, it is currently + # If the value of pekko.remote.artery.transport is set to aeron-udp, it is currently # restricted to 1/8th the size of a term buffer that can be configured by setting the # 'aeron.term.buffer.length' system property. # See 'large-message-destinations'. @@ -869,21 +869,21 @@ akka { # collected, which is not as efficient as reusing buffers in the pool. large-buffer-pool-size = 32 - # For enabling testing features, such as blackhole in akka-remote-testkit. + # For enabling testing features, such as blackhole in pekko-remote-testkit. test-mode = off # Settings for the materializer that is used for the remote streams. - materializer = ${akka.stream.materializer} + materializer = ${pekko.stream.materializer} # Remoting will use the given dispatcher for the ordinary and large message # streams. - use-dispatcher = "akka.remote.default-remote-dispatcher" + use-dispatcher = "pekko.remote.default-remote-dispatcher" # Remoting will use the given dispatcher for the control stream. # It can be good to not use the same dispatcher for the control stream as # the dispatcher for the ordinary message stream so that heartbeat messages # are not disturbed. - use-control-stream-dispatcher = "akka.actor.internal-dispatcher" + use-control-stream-dispatcher = "pekko.actor.internal-dispatcher" # Total number of inbound lanes, shared among all inbound associations. A value @@ -1055,7 +1055,7 @@ akka { # It's only called from the stage, so if it dosn't delegate to any shared instance # it doesn't have to be thread-safe. # Refer to `org.apache.pekko.remote.artery.RemoteInstrument` for more information. - instruments = ${?akka.remote.artery.advanced.instruments} [] + instruments = ${?pekko.remote.artery.advanced.instruments} [] # Only used when transport is aeron-udp aeron { @@ -1131,7 +1131,7 @@ akka { # Must implement org.apache.pekko.remote.artery.tcp.SSLEngineProvider and have a public # constructor with an ActorSystem parameter. # The default ConfigSSLEngineProvider is configured by properties in section - # akka.remote.artery.ssl.config-ssl-engine + # pekko.remote.artery.ssl.config-ssl-engine ssl-engine-provider = org.apache.pekko.remote.artery.tcp.ConfigSSLEngineProvider # Config of org.apache.pekko.remote.artery.tcp.ConfigSSLEngineProvider @@ -1205,27 +1205,27 @@ akka { # By default mTLS is enabled. # This provider also includes a verification phase that runs after the TLS handshake # phase. In this verification, both peers run an authorization and verify they are - # part of the same akka cluster. The verification happens via comparing the subject + # part of the same pekko cluster. The verification happens via comparing the subject # names in the peer's certificate with the name on the own certificate so if you # use this SSLEngineProvider you should make sure all nodes on the cluster include # at least one common subject name (CN or SAN). # The Key setup this implementation supports has some limitations: # 1. the private key must be provided on a PKCS#1 or a non-encrypted PKCS#8 PEM-formatted file - # 2. the private key must be be of an algorythm supported by `akka-pki` tools (e.g. "RSA", not "EC") + # 2. the private key must be be of an algorythm supported by `pekko-pki` tools (e.g. "RSA", not "EC") # 3. the node certificate must be issued by a root CA (not an intermediate CA) # 4. both the node and the CA certificates must be provided in PEM-formatted files rotating-keys-engine { # This is a convention that people may follow if they wish to save themselves some configuration - secret-mount-point = /var/run/secrets/akka-tls/rotating-keys-engine + secret-mount-point = /var/run/secrets/pekko-tls/rotating-keys-engine # The absolute path the PEM file with the private key. - key-file = ${akka.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/tls.key + key-file = ${pekko.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/tls.key # The absolute path to the PEM file of the certificate for the private key above. - cert-file = ${akka.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/tls.crt + cert-file = ${pekko.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/tls.crt # The absolute path to the PEM file of the certificate of the CA that emited # the node certificate above. - ca-cert-file = ${akka.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/ca.crt + ca-cert-file = ${pekko.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/ca.crt # There are two options, and the default SecureRandom is recommended: # "" or "SecureRandom" => (default) diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/Endpoint.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/Endpoint.scala index 25c3119ab0..f34545a77d 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/Endpoint.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/Endpoint.scala @@ -105,7 +105,7 @@ private[remote] class DefaultMessageDispatcher( log.debug( LogMarker.Security, "operating in UntrustedMode, dropping inbound actor selection to [{}], " + - "allow it by adding the path to 'akka.remote.trusted-selection-paths' configuration", + "allow it by adding the path to 'pekko.remote.trusted-selection-paths' configuration", sel.elements.mkString("/", "/", "")) else // run the receive logic for ActorSelectionMessage here to make sure it is not stuck on busy user actor @@ -640,7 +640,7 @@ private[remote] class EndpointWriter( private val markLog = Logging.withMarker(this) val extendedSystem: ExtendedActorSystem = context.system.asInstanceOf[ExtendedActorSystem] val remoteMetrics = RemoteMetricsExtension(extendedSystem) - val backoffDispatcher = context.system.dispatchers.lookup("akka.remote.classic.backoff-remote-dispatcher") + val backoffDispatcher = context.system.dispatchers.lookup("pekko.remote.classic.backoff-remote-dispatcher") var reader: Option[ActorRef] = None var handle: Option[AkkaProtocolHandle] = handleOrActive @@ -1188,7 +1188,7 @@ private[remote] class EndpointReader( if (log.isWarningEnabled) log.warning( "Discarding inbound message to [{}] in read-only association to [{}]. " + - "If this happens often you may consider using akka.remote.use-passive-connections=off " + + "If this happens often you may consider using pekko.remote.use-passive-connections=off " + "or use Artery TCP.", msgOption.map(_.recipient).getOrElse("unknown"), remoteAddress) diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteActorRefProvider.scala index 083239b777..fb06746c0d 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteActorRefProvider.scala @@ -338,7 +338,7 @@ private[pekko] class RemoteActorRefProvider( if (!settings.HasCluster) { if (remoteSettings.UseUnsafeRemoteFeaturesWithoutCluster) log.info( - "Akka Cluster not in use - enabling unsafe features anyway because `akka.remote.use-unsafe-remote-features-outside-cluster` has been enabled.") + "Akka Cluster not in use - enabling unsafe features anyway because `pekko.remote.use-unsafe-remote-features-outside-cluster` has been enabled.") else log.warning("Akka Cluster not in use - Using Akka Cluster is recommended if you need remote watch and deploy.") } @@ -349,7 +349,7 @@ private[pekko] class RemoteActorRefProvider( /** * Logs if deathwatch message is intentionally dropped. To disable - * warnings set `akka.remote.warn-unsafe-watch-outside-cluster` to `off` + * warnings set `pekko.remote.warn-unsafe-watch-outside-cluster` to `off` * or use Akka Cluster. */ private[pekko] def warnIfUnsafeDeathwatchWithoutCluster(watchee: ActorRef, watcher: ActorRef, action: String): Unit = diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteDaemon.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteDaemon.scala index ee28275625..28652b60f7 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteDaemon.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteDaemon.scala @@ -74,11 +74,11 @@ private[pekko] class RemoteSystemDaemon( private val parent2children = new ConcurrentHashMap[ActorRef, Set[ActorRef]] - private val allowListEnabled = system.settings.config.getBoolean("akka.remote.deployment.enable-allow-list") + private val allowListEnabled = system.settings.config.getBoolean("pekko.remote.deployment.enable-allow-list") private val remoteDeploymentAllowList: immutable.Set[String] = { import pekko.util.ccompat.JavaConverters._ if (allowListEnabled) - system.settings.config.getStringList("akka.remote.deployment.allowed-actor-classes").asScala.toSet + system.settings.config.getStringList("pekko.remote.deployment.allowed-actor-classes").asScala.toSet else Set.empty } diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteSettings.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteSettings.scala index e74f15cd1c..3e08ec9b1a 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteSettings.scala @@ -26,24 +26,24 @@ final class RemoteSettings(val config: Config) { import pekko.util.ccompat.JavaConverters._ - val Artery = ArterySettings(getConfig("akka.remote.artery")) + val Artery = ArterySettings(getConfig("pekko.remote.artery")) - val WarnAboutDirectUse: Boolean = getBoolean("akka.remote.warn-about-direct-use") + val WarnAboutDirectUse: Boolean = getBoolean("pekko.remote.warn-about-direct-use") @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") - val LogReceive: Boolean = getBoolean("akka.remote.classic.log-received-messages") + val LogReceive: Boolean = getBoolean("pekko.remote.classic.log-received-messages") @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") - val LogSend: Boolean = getBoolean("akka.remote.classic.log-sent-messages") + val LogSend: Boolean = getBoolean("pekko.remote.classic.log-sent-messages") @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val LogFrameSizeExceeding: Option[Int] = { - if (config.getString("akka.remote.classic.log-frame-size-exceeding").toLowerCase == "off") None - else Some(getBytes("akka.remote.classic.log-frame-size-exceeding").toInt) + if (config.getString("pekko.remote.classic.log-frame-size-exceeding").toLowerCase == "off") None + else Some(getBytes("pekko.remote.classic.log-frame-size-exceeding").toInt) } @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") - val UntrustedMode: Boolean = getBoolean("akka.remote.classic.untrusted-mode") + val UntrustedMode: Boolean = getBoolean("pekko.remote.classic.untrusted-mode") /** * INTERNAL API @@ -53,11 +53,11 @@ final class RemoteSettings(val config: Config) { if (Artery.Enabled) Artery.UntrustedMode else UntrustedMode @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val TrustedSelectionPaths: Set[String] = - immutableSeq(getStringList("akka.remote.classic.trusted-selection-paths")).toSet + immutableSeq(getStringList("pekko.remote.classic.trusted-selection-paths")).toSet @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val RemoteLifecycleEventsLogLevel: LogLevel = toRootLowerCase( - getString("akka.remote.classic.log-remote-lifecycle-events")) match { + getString("pekko.remote.classic.log-remote-lifecycle-events")) match { case "on" => Logging.DebugLevel case other => Logging.levelFor(other) match { @@ -68,7 +68,7 @@ final class RemoteSettings(val config: Config) { } @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") - val Dispatcher: String = getString("akka.remote.classic.use-dispatcher") + val Dispatcher: String = getString("pekko.remote.classic.use-dispatcher") @nowarn("msg=deprecated") def configureDispatcher(props: Props): Props = @@ -80,35 +80,35 @@ final class RemoteSettings(val config: Config) { @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val ShutdownTimeout: Timeout = { - Timeout(config.getMillisDuration("akka.remote.classic.shutdown-timeout")) + Timeout(config.getMillisDuration("pekko.remote.classic.shutdown-timeout")) }.requiring(_.duration > Duration.Zero, "shutdown-timeout must be > 0") @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val FlushWait: FiniteDuration = { - config.getMillisDuration("akka.remote.classic.flush-wait-on-shutdown") + config.getMillisDuration("pekko.remote.classic.flush-wait-on-shutdown") }.requiring(_ > Duration.Zero, "flush-wait-on-shutdown must be > 0") @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val StartupTimeout: Timeout = { - Timeout(config.getMillisDuration("akka.remote.classic.startup-timeout")) + Timeout(config.getMillisDuration("pekko.remote.classic.startup-timeout")) }.requiring(_.duration > Duration.Zero, "startup-timeout must be > 0") @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val RetryGateClosedFor: FiniteDuration = { - config.getMillisDuration("akka.remote.classic.retry-gate-closed-for") + config.getMillisDuration("pekko.remote.classic.retry-gate-closed-for") }.requiring(_ >= Duration.Zero, "retry-gate-closed-for must be >= 0") @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") - val UsePassiveConnections: Boolean = getBoolean("akka.remote.classic.use-passive-connections") + val UsePassiveConnections: Boolean = getBoolean("pekko.remote.classic.use-passive-connections") @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val BackoffPeriod: FiniteDuration = { - config.getMillisDuration("akka.remote.classic.backoff-interval") + config.getMillisDuration("pekko.remote.classic.backoff-interval") }.requiring(_ > Duration.Zero, "backoff-interval must be > 0") @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val LogBufferSizeExceeding: Int = { - val key = "akka.remote.classic.log-buffer-size-exceeding" + val key = "pekko.remote.classic.log-buffer-size-exceeding" config.getString(key).toLowerCase match { case "off" | "false" => Int.MaxValue case _ => config.getInt(key) @@ -117,32 +117,32 @@ final class RemoteSettings(val config: Config) { @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val SysMsgAckTimeout: FiniteDuration = { - config.getMillisDuration("akka.remote.classic.system-message-ack-piggyback-timeout") + config.getMillisDuration("pekko.remote.classic.system-message-ack-piggyback-timeout") }.requiring(_ > Duration.Zero, "system-message-ack-piggyback-timeout must be > 0") @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val SysResendTimeout: FiniteDuration = { - config.getMillisDuration("akka.remote.classic.resend-interval") + config.getMillisDuration("pekko.remote.classic.resend-interval") }.requiring(_ > Duration.Zero, "resend-interval must be > 0") @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val SysResendLimit: Int = { - config.getInt("akka.remote.classic.resend-limit") + config.getInt("pekko.remote.classic.resend-limit") }.requiring(_ > 0, "resend-limit must be > 0") @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val SysMsgBufferSize: Int = { - getInt("akka.remote.classic.system-message-buffer-size") + getInt("pekko.remote.classic.system-message-buffer-size") }.requiring(_ > 0, "system-message-buffer-size must be > 0") @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val InitialSysMsgDeliveryTimeout: FiniteDuration = { - config.getMillisDuration("akka.remote.classic.initial-system-message-delivery-timeout") + config.getMillisDuration("pekko.remote.classic.initial-system-message-delivery-timeout") }.requiring(_ > Duration.Zero, "initial-system-message-delivery-timeout must be > 0") @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val QuarantineSilentSystemTimeout: FiniteDuration = { - val key = "akka.remote.classic.quarantine-after-silence" + val key = "pekko.remote.classic.quarantine-after-silence" config.getString(key).toLowerCase match { case "off" | "false" => Duration.Zero case _ => @@ -153,21 +153,21 @@ final class RemoteSettings(val config: Config) { @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val QuarantineDuration: FiniteDuration = { config - .getMillisDuration("akka.remote.classic.prune-quarantine-marker-after") + .getMillisDuration("pekko.remote.classic.prune-quarantine-marker-after") .requiring(_ > Duration.Zero, "prune-quarantine-marker-after must be > 0 ms") } @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") val CommandAckTimeout: Timeout = { - Timeout(config.getMillisDuration("akka.remote.classic.command-ack-timeout")) + Timeout(config.getMillisDuration("pekko.remote.classic.command-ack-timeout")) }.requiring(_.duration > Duration.Zero, "command-ack-timeout must be > 0") val UseUnsafeRemoteFeaturesWithoutCluster: Boolean = getBoolean( - "akka.remote.use-unsafe-remote-features-outside-cluster") + "pekko.remote.use-unsafe-remote-features-outside-cluster") - val WarnUnsafeWatchWithoutCluster: Boolean = getBoolean("akka.remote.warn-unsafe-watch-outside-cluster") + val WarnUnsafeWatchWithoutCluster: Boolean = getBoolean("pekko.remote.warn-unsafe-watch-outside-cluster") - val WatchFailureDetectorConfig: Config = getConfig("akka.remote.watch-failure-detector") + val WatchFailureDetectorConfig: Config = getConfig("pekko.remote.watch-failure-detector") val WatchFailureDetectorImplementationClass: String = WatchFailureDetectorConfig.getString("implementation-class") val WatchHeartBeatInterval: FiniteDuration = { WatchFailureDetectorConfig.getMillisDuration("heartbeat-interval") @@ -188,10 +188,10 @@ final class RemoteSettings(val config: Config) { } @deprecated("Classic remoting is deprecated, use Artery", "2.6.0") - val Adapters: Map[String, String] = configToMap(getConfig("akka.remote.classic.adapters")) + val Adapters: Map[String, String] = configToMap(getConfig("pekko.remote.classic.adapters")) private def transportNames: immutable.Seq[String] = - immutableSeq(getStringList("akka.remote.classic.enabled-transports")) + immutableSeq(getStringList("pekko.remote.classic.enabled-transports")) private def transportConfigFor(transportName: String): Config = getConfig(transportName) diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteWatcher.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteWatcher.scala index e3372eac55..76d74b00d6 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteWatcher.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/RemoteWatcher.scala @@ -208,7 +208,7 @@ private[pekko] class RemoteWatcher( } /** - * Returns true if either has cluster or `akka.remote.use-unsafe-remote-features-outside-cluster` + * Returns true if either has cluster or `pekko.remote.use-unsafe-remote-features-outside-cluster` * is enabled. Can be overridden when using RemoteWatcher as a superclass. */ protected def shouldWatch(@unused watchee: InternalActorRef): Boolean = { diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/Remoting.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/Remoting.scala index cb9d956a44..90e37d3a58 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/Remoting.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/Remoting.scala @@ -181,7 +181,7 @@ private[remote] class Remoting(_system: ExtendedActorSystem, _provider: RemoteAc if (!flushSuccessful) log.warning( "Shutdown finished, but flushing might not have been successful and some messages might have been dropped. " + - "Increase akka.remote.flush-wait-on-shutdown to a larger value to avoid this.") + "Increase pekko.remote.flush-wait-on-shutdown to a larger value to avoid this.") finalize() case Failure(e) => diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/artery/ArterySettings.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/artery/ArterySettings.scala index 0176c8e104..dca2dd1629 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/artery/ArterySettings.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/artery/ArterySettings.scala @@ -29,7 +29,7 @@ private[pekko] final class ArterySettings private (config: Config) { import config._ def withDisabledCompression(): ArterySettings = - ArterySettings(ConfigFactory.parseString("""|akka.remote.artery.advanced.compression { + ArterySettings(ConfigFactory.parseString("""|pekko.remote.artery.advanced.compression { | actor-refs.max = off | manifests.max = off |}""".stripMargin).withFallback(config)) diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/artery/Handshake.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/artery/Handshake.scala index 7280b94305..0657ef4d41 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/artery/Handshake.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/artery/Handshake.scala @@ -281,7 +281,7 @@ private[remote] class InboundHandshake(inboundContext: InboundContext, inControl "Dropping Handshake Request from [{}] addressed to unknown local address [{}]. " + "Local address is [{}]. Check that the sending system uses the same " + "address to contact recipient system as defined in the " + - "'akka.remote.artery.canonical.hostname' of the recipient system. " + + "'pekko.remote.artery.canonical.hostname' of the recipient system. " + "The name of the ActorSystem must also match.", from, to, diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/artery/MessageDispatcher.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/artery/MessageDispatcher.scala index 69e43015e8..c12e372d90 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/artery/MessageDispatcher.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/artery/MessageDispatcher.scala @@ -67,7 +67,7 @@ private[remote] class MessageDispatcher(system: ExtendedActorSystem, provider: R log.debug( LogMarker.Security, "operating in UntrustedMode, dropping inbound actor selection to [{}], " + - "allow it by adding the path to 'akka.remote.trusted-selection-paths' configuration", + "allow it by adding the path to 'pekko.remote.trusted-selection-paths' configuration", sel.elements.mkString("/", "/", "")) } else // run the receive logic for ActorSelectionMessage here to make sure it is not stuck on busy user actor diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/artery/RemoteInstrument.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/artery/RemoteInstrument.scala index 100f417dc9..d68b70844f 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/artery/RemoteInstrument.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/artery/RemoteInstrument.scala @@ -404,7 +404,7 @@ private[remote] object RemoteInstruments { @InternalStableApi def create(system: ExtendedActorSystem, @unused log: LoggingAdapter): Vector[RemoteInstrument] = { val c = system.settings.config - val path = "akka.remote.artery.advanced.instruments" + val path = "pekko.remote.artery.advanced.instruments" import pekko.util.ccompat.JavaConverters._ val configuredInstruments = c .getStringList(path) diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/artery/tcp/ConfigSSLEngineProvider.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/artery/tcp/ConfigSSLEngineProvider.scala index 79faee21bc..388fcc90c0 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/artery/tcp/ConfigSSLEngineProvider.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/artery/tcp/ConfigSSLEngineProvider.scala @@ -31,7 +31,7 @@ import javax.net.ssl.TrustManagerFactory import scala.util.Try /** - * Config in akka.remote.artery.ssl.config-ssl-engine + * Config in pekko.remote.artery.ssl.config-ssl-engine * * Subclass may override protected methods to replace certain parts, such as key and trust manager. */ @@ -40,7 +40,7 @@ class ConfigSSLEngineProvider(protected val config: Config, protected val log: M def this(system: ActorSystem) = this( - system.settings.config.getConfig("akka.remote.artery.ssl.config-ssl-engine"), + system.settings.config.getConfig("pekko.remote.artery.ssl.config-ssl-engine"), Logging.withMarker(system, classOf[ConfigSSLEngineProvider].getName)) private val sslEngineConfig = new SSLEngineConfig(config) diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/artery/tcp/ssl/RotatingKeysSSLEngineProvider.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/artery/tcp/ssl/RotatingKeysSSLEngineProvider.scala index 929209cd88..c7885c0894 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/artery/tcp/ssl/RotatingKeysSSLEngineProvider.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/artery/tcp/ssl/RotatingKeysSSLEngineProvider.scala @@ -51,7 +51,7 @@ final class RotatingKeysSSLEngineProvider(val config: Config, protected val log: def this(system: ActorSystem) = this( - system.settings.config.getConfig("akka.remote.artery.ssl.rotating-keys-engine"), + system.settings.config.getConfig("pekko.remote.artery.ssl.rotating-keys-engine"), Logging.withMarker(system, classOf[RotatingKeysSSLEngineProvider].getName)) // read config diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/serialization/ProtobufSerializer.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/serialization/ProtobufSerializer.scala index b783b59dbd..eb06fe7ab0 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/serialization/ProtobufSerializer.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/serialization/ProtobufSerializer.scala @@ -50,7 +50,7 @@ class ProtobufSerializer(val system: ExtendedActorSystem) extends BaseSerializer private val allowedClassNames: Set[String] = { import org.apache.pekko.util.ccompat.JavaConverters._ - system.settings.config.getStringList("akka.serialization.protobuf.allowed-classes").asScala.toSet + system.settings.config.getStringList("pekko.serialization.protobuf.allowed-classes").asScala.toSet } // This must lazy otherwise it will deadlock the ActorSystem creation @@ -114,8 +114,8 @@ class ProtobufSerializer(val system: ExtendedActorSystem) extends BaseSerializer if (!isInAllowList(clazz)) { val warnMsg = s"Can't deserialize object of type [${clazz.getName}] in [${getClass.getName}]. " + "Only classes that are on the allow list are allowed for security reasons. " + - "Configure allowed classes with akka.actor.serialization-bindings or " + - "akka.serialization.protobuf.allowed-classes" + "Configure allowed classes with pekko.actor.serialization-bindings or " + + "pekko.serialization.protobuf.allowed-classes" log.warning(LogMarker.Security, warnMsg) throw new IllegalArgumentException(warnMsg) } @@ -131,7 +131,7 @@ class ProtobufSerializer(val system: ExtendedActorSystem) extends BaseSerializer * * If an old class is removed from `serialization-bindings` when it's not used for serialization * but still used for deserialization (e.g. rolling update with serialization changes) it can - * be allowed by specifying in `akka.protobuf.allowed-classes`. + * be allowed by specifying in `pekko.protobuf.allowed-classes`. * * That is also possible when changing a binding from a ProtobufSerializer to another serializer (e.g. Jackson) * and still bind with the same class (interface). diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/transport/AkkaProtocolTransport.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/transport/AkkaProtocolTransport.scala index 355421ffdf..02d544a39e 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/transport/AkkaProtocolTransport.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/transport/AkkaProtocolTransport.scala @@ -41,7 +41,7 @@ private[remote] class AkkaProtocolSettings(config: Config) { import org.apache.pekko.util.Helpers.ConfigOps - val TransportFailureDetectorConfig: Config = getConfig("akka.remote.classic.transport-failure-detector") + val TransportFailureDetectorConfig: Config = getConfig("pekko.remote.classic.transport-failure-detector") val TransportFailureDetectorImplementationClass: String = TransportFailureDetectorConfig.getString("implementation-class") val TransportHeartBeatInterval: FiniteDuration = { @@ -49,14 +49,14 @@ private[remote] class AkkaProtocolSettings(config: Config) { }.requiring(_ > Duration.Zero, "transport-failure-detector.heartbeat-interval must be > 0") val HandshakeTimeout: FiniteDuration = { - val enabledTransports = config.getStringList("akka.remote.classic.enabled-transports") - if (enabledTransports.contains("akka.remote.classic.netty.tcp")) - config.getMillisDuration("akka.remote.classic.netty.tcp.connection-timeout") - else if (enabledTransports.contains("akka.remote.classic.netty.ssl")) - config.getMillisDuration("akka.remote.classic.netty.ssl.connection-timeout") + val enabledTransports = config.getStringList("pekko.remote.classic.enabled-transports") + if (enabledTransports.contains("pekko.remote.classic.netty.tcp")) + config.getMillisDuration("pekko.remote.classic.netty.tcp.connection-timeout") + else if (enabledTransports.contains("pekko.remote.classic.netty.ssl")) + config.getMillisDuration("pekko.remote.classic.netty.ssl.connection-timeout") else config - .getMillisDuration("akka.remote.classic.handshake-timeout") + .getMillisDuration("pekko.remote.classic.handshake-timeout") .requiring(_ > Duration.Zero, "handshake-timeout must be > 0") } } diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/transport/FailureInjectorTransportAdapter.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/transport/FailureInjectorTransportAdapter.scala index d0573014e8..6ef67f43f6 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/transport/FailureInjectorTransportAdapter.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/transport/FailureInjectorTransportAdapter.scala @@ -71,7 +71,7 @@ private[remote] class FailureInjectorTransportAdapter( private def rng = ThreadLocalRandom.current() private val log = Logging(extendedSystem, classOf[FailureInjectorTransportAdapter]) - private val shouldDebugLog: Boolean = extendedSystem.settings.config.getBoolean("akka.remote.classic.gremlin.debug") + private val shouldDebugLog: Boolean = extendedSystem.settings.config.getBoolean("pekko.remote.classic.gremlin.debug") @volatile private var upstreamListener: Option[AssociationEventListener] = None private[transport] val addressChaosTable = new ConcurrentHashMap[Address, GremlinMode]() diff --git a/akka-remote/src/main/scala/org/apache/pekko/remote/transport/netty/SSLEngineProvider.scala b/akka-remote/src/main/scala/org/apache/pekko/remote/transport/netty/SSLEngineProvider.scala index 4ed042336e..08049aa182 100644 --- a/akka-remote/src/main/scala/org/apache/pekko/remote/transport/netty/SSLEngineProvider.scala +++ b/akka-remote/src/main/scala/org/apache/pekko/remote/transport/netty/SSLEngineProvider.scala @@ -38,7 +38,7 @@ trait SSLEngineProvider { } /** - * Config in akka.remote.classic.netty.ssl.security + * Config in pekko.remote.classic.netty.ssl.security * * Subclass may override protected methods to replace certain parts, such as key and trust manager. */ @@ -49,7 +49,7 @@ class ConfigSSLEngineProvider(protected val log: MarkerLoggingAdapter, private v def this(system: ActorSystem) = this( Logging.withMarker(system, classOf[ConfigSSLEngineProvider].getName), - new SSLSettings(system.settings.config.getConfig("akka.remote.classic.netty.ssl.security"))) + new SSLSettings(system.settings.config.getConfig("pekko.remote.classic.netty.ssl.security"))) import settings._ diff --git a/akka-remote/src/test/resources/aeron.properties b/akka-remote/src/test/resources/aeron.properties index b38ed1d356..a80556583b 100644 --- a/akka-remote/src/test/resources/aeron.properties +++ b/akka-remote/src/test/resources/aeron.properties @@ -16,7 +16,7 @@ aeron.threading.mode=SHARED_NETWORK #aeron.sender.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy #aeron.receiver.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy -# use same directory in akka.remote.artery.advanced.aeron.aeron-dir config +# use same directory in pekko.remote.artery.advanced.aeron.aeron-dir config # of the Akka application aeron.dir=target/aeron # on linux, use directory on ram disk, instead diff --git a/akka-remote/src/test/scala-jdk9-only/org/apache/pekko/remote/artery/jfr/JFRRemotingFlightRecorderSpec.scala b/akka-remote/src/test/scala-jdk9-only/org/apache/pekko/remote/artery/jfr/JFRRemotingFlightRecorderSpec.scala index 056c0c67bd..74f3a63a01 100644 --- a/akka-remote/src/test/scala-jdk9-only/org/apache/pekko/remote/artery/jfr/JFRRemotingFlightRecorderSpec.scala +++ b/akka-remote/src/test/scala-jdk9-only/org/apache/pekko/remote/artery/jfr/JFRRemotingFlightRecorderSpec.scala @@ -29,7 +29,7 @@ class JFRRemotingFlightRecorderSpec extends AkkaSpec { "JFRRemotingFlightRecorderSpec-2", ConfigFactory.parseString( """ - akka.java-flight-recorder.enabled = false + pekko.java-flight-recorder.enabled = false """)) try { val extension = RemotingFlightRecorder(system) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/AccrualFailureDetectorSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/AccrualFailureDetectorSpec.scala index cbbd8be182..71b620dd85 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/AccrualFailureDetectorSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/AccrualFailureDetectorSpec.scala @@ -14,7 +14,7 @@ import pekko.remote.FailureDetector.Clock import pekko.testkit.AkkaSpec @nowarn -class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { +class AccrualFailureDetectorSpec extends AkkaSpec("pekko.loglevel = INFO") { "An AccrualFailureDetector" must { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/DaemonicSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/DaemonicSpec.scala index 2b6d682052..05d5a997e6 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/DaemonicSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/DaemonicSpec.scala @@ -26,12 +26,12 @@ class DaemonicSpec extends AkkaSpec { val daemonicSystem = ActorSystem( "daemonic", ConfigFactory.parseString(""" - akka.daemonic = on - akka.actor.provider = remote - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.log-dead-letters-during-shutdown = off - #akka.remote.artery.advanced.aeron.idle-cpu = 5 + pekko.daemonic = on + pekko.actor.provider = remote + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.log-dead-letters-during-shutdown = off + #pekko.remote.artery.advanced.aeron.idle-cpu = 5 """)) try { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/FailureDetectorRegistrySpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/FailureDetectorRegistrySpec.scala index 63e8275f04..a907a148fc 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/FailureDetectorRegistrySpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/FailureDetectorRegistrySpec.scala @@ -10,7 +10,7 @@ import org.apache.pekko import pekko.remote.FailureDetector.Clock import pekko.testkit.AkkaSpec -class FailureDetectorRegistrySpec extends AkkaSpec("akka.loglevel = INFO") { +class FailureDetectorRegistrySpec extends AkkaSpec("pekko.loglevel = INFO") { def fakeTimeGenerator(timeIntervals: Seq[Long]): Clock = new Clock { @volatile var times = diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/LogSourceSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/LogSourceSpec.scala index 8d19abc080..24cbe11ab3 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/LogSourceSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/LogSourceSpec.scala @@ -25,9 +25,9 @@ object LogSourceSpec { } class LogSourceSpec extends AkkaSpec(""" - akka.loglevel = INFO - akka.actor.provider = remote - akka.remote.classic.netty.tcp.port = 0 + pekko.loglevel = INFO + pekko.actor.provider = remote + pekko.remote.classic.netty.tcp.port = 0 """) { import LogSourceSpec._ diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/MessageLoggingSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/MessageLoggingSpec.scala index c893b2ffc2..06b7da421d 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/MessageLoggingSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/MessageLoggingSpec.scala @@ -16,9 +16,9 @@ import pekko.testkit.{ AkkaSpec, ImplicitSender, TestKit } object MessageLoggingSpec { def config(artery: Boolean) = ConfigFactory.parseString(s""" - akka.loglevel = info // debug makes this test fail intentionally - akka.actor.provider = remote - akka.remote { + pekko.loglevel = info // debug makes this test fail intentionally + pekko.actor.provider = remote + pekko.remote { classic { log-received-messages = on diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteActorMailboxSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteActorMailboxSpec.scala index cfe19166c8..897dea3713 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteActorMailboxSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteActorMailboxSpec.scala @@ -10,4 +10,4 @@ import org.apache.pekko.actor.ActorMailboxSpec class RemoteActorMailboxSpec extends ActorMailboxSpec( - ConfigFactory.parseString("""akka.actor.provider = remote""").withFallback(ActorMailboxSpec.mailboxConf)) {} + ConfigFactory.parseString("""pekko.actor.provider = remote""").withFallback(ActorMailboxSpec.mailboxConf)) {} diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteConfigSpec.scala index 0c8470f743..a2071eb27e 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteConfigSpec.scala @@ -18,8 +18,8 @@ import pekko.util.Helpers.ConfigOps @nowarn // classic deprecated class RemoteConfigSpec extends AkkaSpec(""" - akka.actor.provider = remote - akka.remote.classic.netty.tcp.port = 0 + pekko.actor.provider = remote + pekko.remote.classic.netty.tcp.port = 0 """) { "Remoting" should { @@ -36,7 +36,7 @@ class RemoteConfigSpec extends AkkaSpec(""" FlushWait should ===(2 seconds) StartupTimeout.duration should ===(10 seconds) RetryGateClosedFor should ===(5 seconds) - Dispatcher should ===("akka.remote.default-remote-dispatcher") + Dispatcher should ===("pekko.remote.default-remote-dispatcher") UsePassiveConnections should ===(true) BackoffPeriod should ===(5 millis) LogBufferSizeExceeding should ===(50000) @@ -64,7 +64,7 @@ class RemoteConfigSpec extends AkkaSpec(""" WatchFailureDetectorConfig.getMillisDuration("acceptable-heartbeat-pause") should ===(10 seconds) WatchFailureDetectorConfig.getMillisDuration("min-std-deviation") should ===(100 millis) - remoteSettings.config.getString("akka.remote.classic.log-frame-size-exceeding") should ===("off") + remoteSettings.config.getString("pekko.remote.classic.log-frame-size-exceeding") should ===("off") } "be able to parse AkkaProtocol related config elements" in { @@ -78,7 +78,7 @@ class RemoteConfigSpec extends AkkaSpec(""" } "contain correct netty.tcp values in reference.conf" in { - val c = RARP(system).provider.remoteSettings.config.getConfig("akka.remote.classic.netty.tcp") + val c = RARP(system).provider.remoteSettings.config.getConfig("pekko.remote.classic.netty.tcp") val s = new NettyTransportSettings(c) import s._ @@ -102,7 +102,7 @@ class RemoteConfigSpec extends AkkaSpec(""" } "contain correct socket worker pool configuration values in reference.conf" in { - val c = RARP(system).provider.remoteSettings.config.getConfig("akka.remote.classic.netty.tcp") + val c = RARP(system).provider.remoteSettings.config.getConfig("pekko.remote.classic.netty.tcp") // server-socket-worker-pool { @@ -124,7 +124,7 @@ class RemoteConfigSpec extends AkkaSpec(""" } "contain correct ssl configuration values in reference.conf" in { - val sslSettings = new SSLSettings(system.settings.config.getConfig("akka.remote.classic.netty.ssl.security")) + val sslSettings = new SSLSettings(system.settings.config.getConfig("pekko.remote.classic.netty.ssl.security")) sslSettings.SSLKeyStore should ===("keystore") sslSettings.SSLKeyStorePassword should ===("changeme") sslSettings.SSLKeyPassword should ===("changeme") @@ -137,7 +137,7 @@ class RemoteConfigSpec extends AkkaSpec(""" } "have debug logging of the failure injector turned off in reference.conf" in { - val c = RARP(system).provider.remoteSettings.config.getConfig("akka.remote.classic.gremlin") + val c = RARP(system).provider.remoteSettings.config.getConfig("pekko.remote.classic.gremlin") c.getBoolean("debug") should ===(false) } } diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteConsistentHashingRouterSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteConsistentHashingRouterSpec.scala index a15a060944..e105468742 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteConsistentHashingRouterSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteConsistentHashingRouterSpec.scala @@ -13,8 +13,8 @@ import pekko.testkit.AkkaSpec class RemoteConsistentHashingRouterSpec extends AkkaSpec(""" - akka.remote.artery.canonical.port = 0 - akka.actor.provider = remote """) { + pekko.remote.artery.canonical.port = 0 + pekko.actor.provider = remote """) { "ConsistentHashingGroup" must { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteDeployerSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteDeployerSpec.scala index 4fab305dae..1e5b7e9502 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteDeployerSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteDeployerSpec.scala @@ -15,8 +15,8 @@ import pekko.testkit._ object RemoteDeployerSpec { val deployerConf = ConfigFactory.parseString( """ - akka.actor.provider = remote - akka.actor.deployment { + pekko.actor.provider = remote + pekko.actor.deployment { /service2 { router = round-robin-pool nr-of-instances = 3 @@ -24,7 +24,7 @@ object RemoteDeployerSpec { dispatcher = mydispatcher } } - akka.remote.classic.netty.tcp.port = 0 + pekko.remote.classic.netty.tcp.port = 0 """, ConfigParseOptions.defaults) @@ -38,7 +38,7 @@ class RemoteDeployerSpec extends AkkaSpec(RemoteDeployerSpec.deployerConf) { "A RemoteDeployer" must { - "be able to parse 'akka.actor.deployment._' with specified remote nodes" in { + "be able to parse 'pekko.actor.deployment._' with specified remote nodes" in { val service = "/service2" val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service.split("/").drop(1)) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteFeaturesSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteFeaturesSpec.scala index 16a5fcfcaa..d55e1cad02 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteFeaturesSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteFeaturesSpec.scala @@ -34,10 +34,10 @@ object RemoteFeaturesSpec { // string config to pass into `ArteryMultiNodeSpec.extraConfig: Option[String]` for `other` system def common(useUnsafe: Boolean): String = s""" - akka.remote.use-unsafe-remote-features-outside-cluster = $useUnsafe - akka.remote.artery.enabled = on - akka.remote.artery.canonical.port = 0 - akka.log-dead-letters-during-shutdown = off + pekko.remote.use-unsafe-remote-features-outside-cluster = $useUnsafe + pekko.remote.artery.enabled = on + pekko.remote.artery.canonical.port = 0 + pekko.log-dead-letters-during-shutdown = off """ def disabled: Config = @@ -140,7 +140,7 @@ class RemoteFeaturesDisabledSpec extends RemoteFeaturesSpec(RemoteFeaturesSpec.d val masterSystem = newRemoteSystem( name = Some("RS2"), extraConfig = Some(s""" - akka.actor.deployment { + pekko.actor.deployment { /$actorName.remote = "akka://${system.name}@localhost:$port" "/parent*/*".remote = "akka://${system.name}@localhost:$port" } diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteRouterSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteRouterSpec.scala index 0dbfccf163..6c7fd4e266 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteRouterSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/RemoteRouterSpec.scala @@ -25,17 +25,17 @@ object RemoteRouterSpec { } class RemoteRouterSpec extends AkkaSpec(s""" - akka.actor.provider = remote - akka.remote.use-unsafe-remote-features-outside-cluster = on - akka.remote.classic.netty.tcp { + pekko.actor.provider = remote + pekko.remote.use-unsafe-remote-features-outside-cluster = on + pekko.remote.classic.netty.tcp { hostname = localhost port = 0 } - akka.remote.artery.canonical { + pekko.remote.artery.canonical { hostname = "localhost" port = 0 } - akka.actor.deployment { + pekko.actor.deployment { /remote-override { router = round-robin-pool nr-of-instances = 4 @@ -59,7 +59,7 @@ class RemoteRouterSpec extends AkkaSpec(s""" if (RARP(system).provider.remoteSettings.Artery.Enabled) "akka" else "akka.tcp" val conf = ConfigFactory.parseString(s""" - akka { + pekko { actor.deployment { /blub { router = round-robin-pool diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/Ticket1978CommunicationSpec.scala index 6c341abc93..72b3ceff76 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/Ticket1978CommunicationSpec.scala @@ -32,7 +32,7 @@ object Configuration { private val trustStore = getClass.getClassLoader.getResource("truststore").getPath private val keyStore = getClass.getClassLoader.getResource("keystore").getPath private val conf = """ - akka { + pekko { actor.provider = remote test { single-expect-default = 10s @@ -42,7 +42,7 @@ object Configuration { remote.artery.enabled = off - remote.classic.enabled-transports = ["akka.remote.classic.netty.ssl"] + remote.classic.enabled-transports = ["pekko.remote.classic.netty.ssl"] remote.classic.netty.ssl { hostname = localhost @@ -60,8 +60,8 @@ object Configuration { } } # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """ final case class CipherConfig( @@ -84,7 +84,7 @@ object Configuration { val fullConfig = config .withFallback(AkkaSpec.testConf) .withFallback(ConfigFactory.load) - .getConfig("akka.remote.classic.netty.ssl.security") + .getConfig("pekko.remote.classic.netty.ssl.security") val settings = new SSLSettings(fullConfig) val sslEngineProvider = new ConfigSSLEngineProvider(NoMarkerLogging, settings) @@ -135,7 +135,7 @@ abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig) lazy val other: ActorSystem = ActorSystem( "remote-sys", ConfigFactory - .parseString("akka.remote.classic.netty.ssl.port = " + cipherConfig.remotePort) + .parseString("pekko.remote.classic.netty.ssl.port = " + cipherConfig.remotePort) .withFallback(system.settings.config)) override def afterTermination(): Unit = { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/Ticket1978ConfigSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/Ticket1978ConfigSpec.scala index cda92cfa8a..45ed11dc4f 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/Ticket1978ConfigSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/Ticket1978ConfigSpec.scala @@ -9,14 +9,14 @@ import pekko.remote.transport.netty.SSLSettings import pekko.testkit._ class Ticket1978ConfigSpec extends AkkaSpec(""" - akka.remote.classic.netty.ssl.security { + pekko.remote.classic.netty.ssl.security { random-number-generator = "SecureRandom" } """) with ImplicitSender with DefaultTimeout { "SSL Remoting" must { "be able to parse these extra Netty config elements" in { - val settings = new SSLSettings(system.settings.config.getConfig("akka.remote.classic.netty.ssl.security")) + val settings = new SSLSettings(system.settings.config.getConfig("pekko.remote.classic.netty.ssl.security")) settings.SSLKeyStore should ===("keystore") settings.SSLKeyStorePassword should ===("changeme") diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/TransientSerializationErrorSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/TransientSerializationErrorSpec.scala index 2c44430ea6..0ea9e59160 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/TransientSerializationErrorSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/TransientSerializationErrorSpec.scala @@ -52,7 +52,7 @@ abstract class AbstractTransientSerializationErrorSpec(config: Config) extends AkkaSpec( config.withFallback( ConfigFactory.parseString(""" - akka { + pekko { loglevel = info actor { provider = remote @@ -116,8 +116,8 @@ abstract class AbstractTransientSerializationErrorSpec(config: Config) class TransientSerializationErrorSpec extends AbstractTransientSerializationErrorSpec(ConfigFactory.parseString(""" - akka.remote.artery.enabled = false - akka.remote.classic.netty.tcp { + pekko.remote.artery.enabled = false + pekko.remote.classic.netty.tcp { hostname = localhost port = 0 } diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/TypedActorRemoteDeploySpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/TypedActorRemoteDeploySpec.scala index bcff295e97..3bf2dcd792 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/TypedActorRemoteDeploySpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/TypedActorRemoteDeploySpec.scala @@ -17,11 +17,11 @@ import pekko.testkit.AkkaSpec object TypedActorRemoteDeploySpec { val conf = ConfigFactory.parseString(""" - akka.actor.provider = remote - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.use-unsafe-remote-features-outside-cluster = on - akka.actor.allow-java-serialization = on + pekko.actor.provider = remote + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.use-unsafe-remote-features-outside-cluster = on + pekko.actor.allow-java-serialization = on """) trait RemoteNameService { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/ActorRefResolveCacheQuarantineSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/ActorRefResolveCacheQuarantineSpec.scala index e024ea3a6e..f5021e82a4 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/ActorRefResolveCacheQuarantineSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/ActorRefResolveCacheQuarantineSpec.scala @@ -19,7 +19,7 @@ import pekko.util.Timeout */ class ActorRefResolveCacheQuarantineSpec extends ArteryMultiNodeSpec(""" - akka.remote.artery.advanced.remove-quarantined-association-after = 2 seconds + pekko.remote.artery.advanced.remove-quarantined-association-after = 2 seconds """) with ImplicitSender { import RemoteFailureSpec._ @@ -49,7 +49,7 @@ class ActorRefResolveCacheQuarantineSpec val clientSystem2 = newRemoteSystem( name = Some(clientSystem1.name), - extraConfig = Some(s"akka.remote.artery.canonical.port = $port1")) + extraConfig = Some(s"pekko.remote.artery.canonical.port = $port1")) val remoteSelection2 = clientSystem2.actorSelection(rootActorPath(system) / "user" / "echo") val reply2 = (remoteSelection2 ? "hello-2").futureValue diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/ArteryMultiNodeSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/ArteryMultiNodeSpec.scala index a2e4ae613c..9ec76857d0 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/ArteryMultiNodeSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/ArteryMultiNodeSpec.scala @@ -79,14 +79,14 @@ abstract class ArteryMultiNodeSpec(config: Config) } def arteryTcpTlsEnabled(system: ActorSystem = system): Boolean = { - val arterySettings = ArterySettings(system.settings.config.getConfig("akka.remote.artery")) + val arterySettings = ArterySettings(system.settings.config.getConfig("pekko.remote.artery")) arterySettings.Enabled && arterySettings.Transport == ArterySettings.TlsTcp } } object ArteryMultiNodeSpec { def arteryUdpEnabled(systemConfig: Config): Boolean = { - val arterySettings = ArterySettings(systemConfig.getConfig("akka.remote.artery")) + val arterySettings = ArterySettings(systemConfig.getConfig("pekko.remote.artery")) arterySettings.Transport == ArterySettings.AeronUpd } diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/ArterySpecSupport.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/ArterySpecSupport.scala index 201908a3b6..a606eafa89 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/ArterySpecSupport.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/ArterySpecSupport.scala @@ -9,7 +9,7 @@ import com.typesafe.config.{ Config, ConfigFactory } object ArterySpecSupport { // same for all artery enabled remoting tests private val staticArteryRemotingConfig = ConfigFactory.parseString(s""" - akka { + pekko { actor { provider = remote } @@ -40,7 +40,7 @@ object ArterySpecSupport { val keyStore = getClass.getClassLoader.getResource("keystore").getPath ConfigFactory.parseString(s""" - akka.remote.artery.ssl.config-ssl-engine { + pekko.remote.artery.ssl.config-ssl-engine { key-store = "$keyStore" trust-store = "$trustStore" } diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/BindCanonicalAddressSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/BindCanonicalAddressSpec.scala index ddc6d353a2..7deefe8dee 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/BindCanonicalAddressSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/BindCanonicalAddressSpec.scala @@ -26,7 +26,7 @@ trait BindCanonicalAddressBehaviors { "bind to a random port" in { val config = ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.port = 0 """) implicit val sys = ActorSystem("sys", config.withFallback(commonConfig)) @@ -39,8 +39,8 @@ trait BindCanonicalAddressBehaviors { val address = SocketUtil.temporaryServerAddress(InetAddress.getLocalHost.getHostAddress, udp = isUDP) val config = ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port = ${address.getPort} - akka.remote.artery.bind.port = 0 + pekko.remote.artery.canonical.port = ${address.getPort} + pekko.remote.artery.bind.port = 0 """) implicit val sys = ActorSystem("sys", config.withFallback(commonConfig)) @@ -62,9 +62,9 @@ trait BindCanonicalAddressBehaviors { "bind to a specified bind hostname and remoting aspects from canonical hostname" in { val config = ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port = 0 - akka.remote.artery.canonical.hostname = "127.0.0.1" - akka.remote.artery.bind.hostname = "localhost" + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.canonical.hostname = "127.0.0.1" + pekko.remote.artery.bind.hostname = "localhost" """) implicit val sys = ActorSystem("sys", config.withFallback(commonConfig)) @@ -77,8 +77,8 @@ trait BindCanonicalAddressBehaviors { val address = SocketUtil.temporaryServerAddress(InetAddress.getLocalHost.getHostAddress, udp = isUDP) val config = ConfigFactory.parseString(s""" - akka.remote.artery.canonical.port = 0 - akka.remote.artery.bind.port = ${address.getPort} + pekko.remote.artery.canonical.port = 0 + pekko.remote.artery.bind.port = ${address.getPort} """) implicit val sys = ActorSystem("sys", config.withFallback(commonConfig)) @@ -89,7 +89,7 @@ trait BindCanonicalAddressBehaviors { "bind to all interfaces" in { val config = ConfigFactory.parseString(s""" - akka.remote.artery.bind.hostname = "0.0.0.0" + pekko.remote.artery.bind.hostname = "0.0.0.0" """) implicit val sys = ActorSystem("sys", config.withFallback(commonConfig)) @@ -116,7 +116,7 @@ class BindCanonicalAddressSpec extends AnyWordSpec with Matchers with BindCanoni object BindCanonicalAddressSpec { def commonConfig(transport: String) = ConfigFactory.parseString(s""" - akka { + pekko { actor.provider = remote remote.artery.enabled = true remote.artery.transport = "$transport" diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/DuplicateFlushSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/DuplicateFlushSpec.scala index dcbc1f9563..a8f3232fc2 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/DuplicateFlushSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/DuplicateFlushSpec.scala @@ -20,7 +20,7 @@ import pekko.testkit.ImplicitSender import pekko.util.OptionVal class DuplicateFlushSpec extends AkkaSpec(""" - akka.stream.materializer.debug.fuzzing-mode = on + pekko.stream.materializer.debug.fuzzing-mode = on """) with ImplicitSender { private val pool = new EnvelopeBufferPool(1034 * 1024, 128) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/DuplicateHandshakeSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/DuplicateHandshakeSpec.scala index 92528c9323..85f9717a91 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/DuplicateHandshakeSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/DuplicateHandshakeSpec.scala @@ -21,7 +21,7 @@ import pekko.testkit.ImplicitSender import pekko.util.OptionVal class DuplicateHandshakeSpec extends AkkaSpec(""" - akka.stream.materializer.debug.fuzzing-mode = on + pekko.stream.materializer.debug.fuzzing-mode = on """) with ImplicitSender { val pool = new EnvelopeBufferPool(1034 * 1024, 128) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/HandshakeDenySpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/HandshakeDenySpec.scala index 379ee4e14c..189c9ea51c 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/HandshakeDenySpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/HandshakeDenySpec.scala @@ -16,9 +16,9 @@ import pekko.testkit._ object HandshakeDenySpec { val commonConfig = ConfigFactory.parseString(s""" - akka.loglevel = WARNING - akka.remote.artery.advanced.handshake-timeout = 2s - akka.remote.artery.advanced.aeron.image-liveness-timeout = 1.9s + pekko.loglevel = WARNING + pekko.remote.artery.advanced.handshake-timeout = 2s + pekko.remote.artery.advanced.aeron.image-liveness-timeout = 1.9s """).withFallback(ArterySpecSupport.defaultConfig) } diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/HandshakeFailureSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/HandshakeFailureSpec.scala index 80419d93dd..c566f1f380 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/HandshakeFailureSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/HandshakeFailureSpec.scala @@ -17,8 +17,8 @@ import pekko.testkit.TestProbe object HandshakeFailureSpec { val commonConfig = ConfigFactory.parseString(s""" - akka.remote.artery.advanced.handshake-timeout = 2s - akka.remote.artery.advanced.aeron.image-liveness-timeout = 1.9s + pekko.remote.artery.advanced.handshake-timeout = 2s + pekko.remote.artery.advanced.aeron.image-liveness-timeout = 1.9s """).withFallback(ArterySpecSupport.defaultConfig) } @@ -35,7 +35,7 @@ class HandshakeFailureSpec extends ArteryMultiNodeSpec(HandshakeFailureSpec.comm expectNoMessage(3.seconds) // longer than handshake-timeout val systemB = - newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"akka.remote.artery.canonical.port = $portB")) + newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"pekko.remote.artery.canonical.port = $portB")) systemB.actorOf(TestActors.echoActorProps, "echo") within(10.seconds) { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/HandshakeRetrySpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/HandshakeRetrySpec.scala index 97ecafb64b..f29a152927 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/HandshakeRetrySpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/HandshakeRetrySpec.scala @@ -15,8 +15,8 @@ import pekko.testkit.TestActors object HandshakeRetrySpec { val commonConfig = ConfigFactory.parseString(s""" - akka.remote.artery.advanced.handshake-timeout = 10s - akka.remote.artery.advanced.aeron.image-liveness-timeout = 7s + pekko.remote.artery.advanced.handshake-timeout = 10s + pekko.remote.artery.advanced.aeron.image-liveness-timeout = 7s """).withFallback(ArterySpecSupport.defaultConfig) } @@ -33,7 +33,7 @@ class HandshakeRetrySpec extends ArteryMultiNodeSpec(HandshakeRetrySpec.commonCo expectNoMessage(1.second) val systemB = - newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"akka.remote.artery.canonical.port = $portB")) + newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"pekko.remote.artery.canonical.port = $portB")) systemB.actorOf(TestActors.echoActorProps, "echo") expectMsg("hello") diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/InboundControlJunctionSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/InboundControlJunctionSpec.scala index 5e7e560e17..b34ac2ef1b 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/InboundControlJunctionSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/InboundControlJunctionSpec.scala @@ -29,10 +29,10 @@ object InboundControlJunctionSpec { class InboundControlJunctionSpec extends AkkaSpec(""" - akka.actor.serialization-bindings { + pekko.actor.serialization-bindings { "org.apache.pekko.remote.artery.InboundControlJunctionSpec$TestControlMessage" = java } - akka.stream.materializer.debug.fuzzing-mode = on + pekko.stream.materializer.debug.fuzzing-mode = on """) with ImplicitSender { import InboundControlJunctionSpec._ diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/InboundHandshakeSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/InboundHandshakeSpec.scala index 05f26b65ae..d33c18216b 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/InboundHandshakeSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/InboundHandshakeSpec.scala @@ -31,7 +31,7 @@ object InboundHandshakeSpec { } class InboundHandshakeSpec extends AkkaSpec(""" - akka.stream.materializer.debug.fuzzing-mode = on + pekko.stream.materializer.debug.fuzzing-mode = on """) with ImplicitSender { val addressA = UniqueAddress(Address("akka", "sysA", "hostA", 1001), 1) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/LargeMessagesStreamSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/LargeMessagesStreamSpec.scala index 0f85ba2bc2..f9cdb37663 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/LargeMessagesStreamSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/LargeMessagesStreamSpec.scala @@ -28,7 +28,7 @@ object LargeMessagesStreamSpec { class LargeMessagesStreamSpec extends ArteryMultiNodeSpec( """ - akka { + pekko { remote.artery.large-message-destinations = [ "/user/large1", "/user/large2", "/user/large3" , "/user/largeWildcard*" ] } """.stripMargin) { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/LateConnectSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/LateConnectSpec.scala index 1ad22e8181..8922d07502 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/LateConnectSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/LateConnectSpec.scala @@ -18,8 +18,8 @@ import pekko.testkit.TestProbe object LateConnectSpec { val config = ConfigFactory.parseString(s""" - akka.remote.artery.advanced.handshake-timeout = 3s - akka.remote.artery.advanced.aeron.image-liveness-timeout = 2.9s + pekko.remote.artery.advanced.handshake-timeout = 3s + pekko.remote.artery.advanced.aeron.image-liveness-timeout = 2.9s """).withFallback(ArterySpecSupport.defaultConfig) } @@ -28,7 +28,7 @@ class LateConnectSpec extends ArteryMultiNodeSpec(LateConnectSpec.config) with I val portB = freePort() lazy val systemB = - newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"akka.remote.artery.canonical.port = $portB")) + newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"pekko.remote.artery.canonical.port = $portB")) "Connection" must { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/MetadataCarryingSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/MetadataCarryingSpec.scala index 852a02660a..7061933e8a 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/MetadataCarryingSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/MetadataCarryingSpec.scala @@ -103,7 +103,7 @@ object MetadataCarryingSpec { } class MetadataCarryingSpec extends ArteryMultiNodeSpec(""" - akka { + pekko { remote.artery.advanced { instruments = [ "org.apache.pekko.remote.artery.TestInstrument" ] } diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/OutboundControlJunctionSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/OutboundControlJunctionSpec.scala index 09d3dfa5c8..2444ce3e53 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/OutboundControlJunctionSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/OutboundControlJunctionSpec.scala @@ -21,7 +21,7 @@ object OutboundControlJunctionSpec { } class OutboundControlJunctionSpec extends AkkaSpec(""" - akka.stream.materializer.debug.fuzzing-mode = on + pekko.stream.materializer.debug.fuzzing-mode = on """) with ImplicitSender { import OutboundControlJunctionSpec._ diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/OutboundHandshakeSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/OutboundHandshakeSpec.scala index 3f4e630be2..b6b2013a9f 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/OutboundHandshakeSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/OutboundHandshakeSpec.scala @@ -21,7 +21,7 @@ import pekko.testkit.ImplicitSender import pekko.util.OptionVal class OutboundHandshakeSpec extends AkkaSpec(""" - akka.stream.materializer.debug.fuzzing-mode = on + pekko.stream.materializer.debug.fuzzing-mode = on """) with ImplicitSender { val addressA = UniqueAddress(Address("akka", "sysA", "hostA", 1001), 1) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/OutboundIdleShutdownSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/OutboundIdleShutdownSpec.scala index 94744f406b..fa9e602958 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/OutboundIdleShutdownSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/OutboundIdleShutdownSpec.scala @@ -23,11 +23,11 @@ import pekko.testkit.TestActors import pekko.testkit.TestProbe class OutboundIdleShutdownSpec extends ArteryMultiNodeSpec(s""" - akka.loglevel=INFO - akka.remote.artery.advanced.stop-idle-outbound-after = 1 s - akka.remote.artery.advanced.connection-timeout = 2 s - akka.remote.artery.advanced.remove-quarantined-association-after = 1 s - akka.remote.artery.advanced.compression { + pekko.loglevel=INFO + pekko.remote.artery.advanced.stop-idle-outbound-after = 1 s + pekko.remote.artery.advanced.connection-timeout = 2 s + pekko.remote.artery.advanced.remove-quarantined-association-after = 1 s + pekko.remote.artery.advanced.compression { actor-refs.advertisement-interval = 5 seconds } """) with ImplicitSender with Eventually { @@ -149,8 +149,8 @@ class OutboundIdleShutdownSpec extends ArteryMultiNodeSpec(s""" val remoteSystem2 = newRemoteSystem( Some(s""" - akka.remote.artery.canonical.hostname = ${remoteAddress.host.get} - akka.remote.artery.canonical.port = ${remoteAddress.port.get} + pekko.remote.artery.canonical.hostname = ${remoteAddress.host.get} + pekko.remote.artery.canonical.port = ${remoteAddress.port.get} """), name = Some(remoteAddress.system)) try { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteActorSelectionSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteActorSelectionSpec.scala index e88622b821..1d8e79a20a 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteActorSelectionSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteActorSelectionSpec.scala @@ -57,7 +57,7 @@ class RemoteActorSelectionSpec extends ArteryMultiNodeSpec with ImplicitSender { def config(port: Int) = s""" - akka { + pekko { remote.artery.port = $port actor.deployment { /looker2/child.remote = "akka://$remoteSysName@localhost:$remotePort" diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteConnectionSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteConnectionSpec.scala index 396119182c..d70e3da649 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteConnectionSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteConnectionSpec.scala @@ -10,7 +10,7 @@ import org.apache.pekko import pekko.actor.ActorSystem import pekko.testkit.{ EventFilter, ImplicitSender, TestActors, TestEvent, TestProbe } -class RemoteConnectionSpec extends ArteryMultiNodeSpec("akka.remote.retry-gate-closed-for = 5s") with ImplicitSender { +class RemoteConnectionSpec extends ArteryMultiNodeSpec("pekko.remote.retry-gate-closed-for = 5s") with ImplicitSender { def muteSystem(system: ActorSystem): Unit = { system.eventStream.publish( @@ -34,7 +34,7 @@ class RemoteConnectionSpec extends ArteryMultiNodeSpec("akka.remote.retry-gate-c localProbe.expectNoMessage(1.seconds) // then start the remote system and try again - val remoteSystem = newRemoteSystem(extraConfig = Some(s"akka.remote.artery.canonical.port=$remotePort")) + val remoteSystem = newRemoteSystem(extraConfig = Some(s"pekko.remote.artery.canonical.port=$remotePort")) muteSystem(remoteSystem) localProbe.expectNoMessage(2.seconds) @@ -65,7 +65,7 @@ class RemoteConnectionSpec extends ArteryMultiNodeSpec("akka.remote.retry-gate-c localProbe.expectNoMessage(1.seconds) // then when it is up, talk from other system - val remoteSystem = newRemoteSystem(extraConfig = Some(s"akka.remote.artery.canonical.port=$remotePort")) + val remoteSystem = newRemoteSystem(extraConfig = Some(s"pekko.remote.artery.canonical.port=$remotePort")) muteSystem(remoteSystem) localProbe.expectNoMessage(2.seconds) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteDeathWatchSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteDeathWatchSpec.scala index c8d89e062c..a01bd07fbe 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteDeathWatchSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteDeathWatchSpec.scala @@ -20,7 +20,7 @@ object RemoteDeathWatchSpec { val otherPort = ArteryMultiNodeSpec.freePort(ConfigFactory.load()) val config = ConfigFactory.parseString(s""" - akka { + pekko { actor { provider = remote deployment { @@ -39,8 +39,8 @@ object RemoteDeathWatchSpec { } } # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """).withFallback(ArterySpecSupport.defaultConfig) } @@ -53,7 +53,8 @@ class RemoteDeathWatchSpec system.eventStream.publish(TestEvent.Mute(EventFilter[io.aeron.exceptions.RegistrationException]())) - val other = newRemoteSystem(name = Some("other"), extraConfig = Some(s"akka.remote.artery.canonical.port=$otherPort")) + val other = + newRemoteSystem(name = Some("other"), extraConfig = Some(s"pekko.remote.artery.canonical.port=$otherPort")) override def expectedTestDuration: FiniteDuration = 120.seconds diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteDeployerSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteDeployerSpec.scala index 84bcfe1955..0cf5320212 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteDeployerSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteDeployerSpec.scala @@ -15,7 +15,7 @@ import pekko.testkit._ object RemoteDeployerSpec { val deployerConf = ConfigFactory.parseString(""" - akka.actor.deployment { + pekko.actor.deployment { /service2 { router = round-robin-pool nr-of-instances = 3 @@ -35,7 +35,7 @@ class RemoteDeployerSpec extends AkkaSpec(RemoteDeployerSpec.deployerConf) { "A RemoteDeployer" must { - "be able to parse 'akka.actor.deployment._' with specified remote nodes" in { + "be able to parse 'pekko.actor.deployment._' with specified remote nodes" in { val service = "/service2" val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service.split("/").drop(1)) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteDeploymentSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteDeploymentSpec.scala index a5c4e3d2bf..3cb3a1c262 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteDeploymentSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteDeploymentSpec.scala @@ -17,7 +17,7 @@ object RemoteDeploymentSpec { def receive = { case "throwInvalidActorNameException" => - // InvalidActorNameException is supported by akka-misc + // InvalidActorNameException is supported by pekko-misc throw InvalidActorNameException("wrong name") case "throwException" => // no specific serialization binding for Exception @@ -71,9 +71,9 @@ object RemoteDeploymentSpec { class RemoteDeploymentSpec extends ArteryMultiNodeSpec(ConfigFactory.parseString(""" - akka.remote.artery.advanced.inbound-lanes = 10 - akka.remote.artery.advanced.outbound-lanes = 3 - akka.remote.use-unsafe-remote-features-outside-cluster = on + pekko.remote.artery.advanced.inbound-lanes = 10 + pekko.remote.artery.advanced.outbound-lanes = 3 + pekko.remote.use-unsafe-remote-features-outside-cluster = on """).withFallback(ArterySpecSupport.defaultConfig)) { import RemoteDeploymentSpec._ @@ -81,13 +81,13 @@ class RemoteDeploymentSpec val port = RARP(system).provider.getDefaultAddress.port.get val conf = s""" - akka.actor.deployment { + pekko.actor.deployment { /blub.remote = "akka://${system.name}@localhost:$port" /blub2.remote = "akka://${system.name}@localhost:$port" "/parent*/*".remote = "akka://${system.name}@localhost:$port" } - akka.remote.artery.advanced.inbound-lanes = 10 - akka.remote.artery.advanced.outbound-lanes = 3 + pekko.remote.artery.advanced.inbound-lanes = 10 + pekko.remote.artery.advanced.outbound-lanes = 3 """ val masterSystem = newRemoteSystem(name = Some("Master" + system.name), extraConfig = Some(conf)) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteInstrumentsSerializationSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteInstrumentsSerializationSpec.scala index b689c44f8b..69aef2d7ab 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteInstrumentsSerializationSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteInstrumentsSerializationSpec.scala @@ -16,7 +16,7 @@ import pekko.util.{ unused, OptionVal } import java.nio.ByteOrder -class RemoteInstrumentsSerializationSpec extends AkkaSpec("akka.loglevel = DEBUG") { +class RemoteInstrumentsSerializationSpec extends AkkaSpec("pekko.loglevel = DEBUG") { import RemoteInstrumentsSerializationSpec._ def remoteInstruments(instruments: RemoteInstrument*): RemoteInstruments = { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteRouterSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteRouterSpec.scala index 636bd18581..73ec7c9140 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteRouterSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteRouterSpec.scala @@ -27,8 +27,8 @@ object RemoteRouterSpec { class RemoteRouterSpec extends AkkaSpec(ConfigFactory.parseString(""" - akka.remote.use-unsafe-remote-features-outside-cluster = on - akka.actor.deployment { + pekko.remote.use-unsafe-remote-features-outside-cluster = on + pekko.actor.deployment { /remote-override { router = round-robin-pool nr-of-instances = 4 @@ -48,7 +48,7 @@ class RemoteRouterSpec val port = RARP(system).provider.getDefaultAddress.port.get val sysName = system.name val conf = ConfigFactory.parseString(s""" - akka { + pekko { actor.deployment { /blub { router = round-robin-pool diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteSendConsistencySpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteSendConsistencySpec.scala index aba08d77e8..dee9b4bf98 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteSendConsistencySpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteSendConsistencySpec.scala @@ -25,45 +25,45 @@ import pekko.testkit.{ ImplicitSender, TestActors, TestProbe } class ArteryUpdSendConsistencyWithOneLaneSpec extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" - akka.remote.artery.transport = aeron-udp - akka.remote.artery.advanced.outbound-lanes = 1 - akka.remote.artery.advanced.inbound-lanes = 1 + pekko.remote.artery.transport = aeron-udp + pekko.remote.artery.advanced.outbound-lanes = 1 + pekko.remote.artery.advanced.inbound-lanes = 1 """).withFallback(ArterySpecSupport.defaultConfig)) class ArteryUpdSendConsistencyWithThreeLanesSpec extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.remote.artery.transport = aeron-udp - akka.remote.artery.advanced.outbound-lanes = 3 - akka.remote.artery.advanced.inbound-lanes = 3 + pekko.loglevel = DEBUG + pekko.remote.artery.transport = aeron-udp + pekko.remote.artery.advanced.outbound-lanes = 3 + pekko.remote.artery.advanced.inbound-lanes = 3 """).withFallback(ArterySpecSupport.defaultConfig)) class ArteryTcpSendConsistencyWithOneLaneSpec extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" - akka.remote.artery.transport = tcp - akka.remote.artery.advanced.outbound-lanes = 1 - akka.remote.artery.advanced.inbound-lanes = 1 + pekko.remote.artery.transport = tcp + pekko.remote.artery.advanced.outbound-lanes = 1 + pekko.remote.artery.advanced.inbound-lanes = 1 """).withFallback(ArterySpecSupport.defaultConfig)) class ArteryTcpSendConsistencyWithThreeLanesSpec extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" - akka.remote.artery.transport = tcp - akka.remote.artery.advanced.outbound-lanes = 3 - akka.remote.artery.advanced.inbound-lanes = 3 + pekko.remote.artery.transport = tcp + pekko.remote.artery.advanced.outbound-lanes = 3 + pekko.remote.artery.advanced.inbound-lanes = 3 """).withFallback(ArterySpecSupport.defaultConfig)) class ArteryTlsTcpSendConsistencyWithOneLaneSpec extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" - akka.remote.artery.transport = tls-tcp - akka.remote.artery.advanced.outbound-lanes = 1 - akka.remote.artery.advanced.inbound-lanes = 1 + pekko.remote.artery.transport = tls-tcp + pekko.remote.artery.advanced.outbound-lanes = 1 + pekko.remote.artery.advanced.inbound-lanes = 1 """).withFallback(ArterySpecSupport.defaultConfig)) class ArteryTlsTcpSendConsistencyWithThreeLanesSpec extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" - akka.remote.artery.transport = tls-tcp - akka.remote.artery.advanced.outbound-lanes = 1 - akka.remote.artery.advanced.inbound-lanes = 1 + pekko.remote.artery.transport = tls-tcp + pekko.remote.artery.advanced.outbound-lanes = 1 + pekko.remote.artery.advanced.inbound-lanes = 1 """).withFallback(ArterySpecSupport.defaultConfig)) abstract class AbstractRemoteSendConsistencySpec(config: Config) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteWatcherSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteWatcherSpec.scala index c8ad2bca72..791be57d5d 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteWatcherSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/RemoteWatcherSpec.scala @@ -72,7 +72,7 @@ object RemoteWatcherSpec { class RemoteWatcherSpec extends ArteryMultiNodeSpec( ConfigFactory - .parseString("akka.remote.use-unsafe-remote-features-outside-cluster = on") + .parseString("pekko.remote.use-unsafe-remote-features-outside-cluster = on") .withFallback(ArterySpecSupport.defaultConfig)) with ImplicitSender { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SendQueueSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SendQueueSpec.scala index 900c12cda2..da0556a0c1 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SendQueueSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SendQueueSpec.scala @@ -51,8 +51,8 @@ object SendQueueSpec { } class SendQueueSpec extends AkkaSpec(""" - akka.stream.materializer.debug.fuzzing-mode = on - akka.stream.secret-test-fuzzing-warning-disable = yep + pekko.stream.materializer.debug.fuzzing-mode = on + pekko.stream.secret-test-fuzzing-warning-disable = yep """) with ImplicitSender { import SendQueueSpec._ diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SerializationErrorSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SerializationErrorSpec.scala index 449b9794a8..5239386147 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SerializationErrorSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SerializationErrorSpec.scala @@ -22,7 +22,7 @@ class SerializationErrorSpec extends ArteryMultiNodeSpec(ArterySpecSupport.defau val systemB = newRemoteSystem( name = Some("systemB"), extraConfig = Some(""" - akka.actor.serialization-identifiers { + pekko.actor.serialization-identifiers { # this will cause deserialization error "org.apache.pekko.serialization.ByteArraySerializer" = -4 } diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SystemMessageAckerSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SystemMessageAckerSpec.scala index b43d5c3692..33f25c46aa 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SystemMessageAckerSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SystemMessageAckerSpec.scala @@ -19,7 +19,7 @@ import pekko.testkit.TestProbe import pekko.util.OptionVal class SystemMessageAckerSpec extends AkkaSpec(""" - akka.stream.materializer.debug.fuzzing-mode = on + pekko.stream.materializer.debug.fuzzing-mode = on """) with ImplicitSender { val addressA = UniqueAddress(Address("akka", "sysA", "hostA", 1001), 1) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SystemMessageDeliverySpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SystemMessageDeliverySpec.scala index 981ad5bfdf..b93a637163 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SystemMessageDeliverySpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/SystemMessageDeliverySpec.scala @@ -39,17 +39,17 @@ object SystemMessageDeliverySpec { case class TestSysMsg(s: String) extends SystemMessageDelivery.AckedDeliveryMessage val safe = ConfigFactory.parseString(s""" - akka.loglevel = INFO - akka.remote.artery.advanced.stop-idle-outbound-after = 1000 ms - akka.remote.artery.advanced.inject-handshake-interval = 500 ms - akka.remote.watch-failure-detector.heartbeat-interval = 2 s - akka.remote.artery.log-received-messages = on - akka.remote.artery.log-sent-messages = on - akka.stream.materializer.debug.fuzzing-mode = on + pekko.loglevel = INFO + pekko.remote.artery.advanced.stop-idle-outbound-after = 1000 ms + pekko.remote.artery.advanced.inject-handshake-interval = 500 ms + pekko.remote.watch-failure-detector.heartbeat-interval = 2 s + pekko.remote.artery.log-received-messages = on + pekko.remote.artery.log-sent-messages = on + pekko.stream.materializer.debug.fuzzing-mode = on """).withFallback(ArterySpecSupport.defaultConfig) val config = - ConfigFactory.parseString("akka.remote.use-unsafe-remote-features-outside-cluster = on").withFallback(safe) + ConfigFactory.parseString("pekko.remote.use-unsafe-remote-features-outside-cluster = on").withFallback(safe) } abstract class AbstractSystemMessageDeliverySpec(c: Config) extends ArteryMultiNodeSpec(c) with ImplicitSender { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/TestContext.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/TestContext.scala index e23c3ed857..4fc13a2ebd 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/TestContext.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/TestContext.scala @@ -64,7 +64,7 @@ private[remote] class TestInboundContext( new TestOutboundContext(localAddress, remoteAddress, controlSubject, controlProbe) override lazy val settings: ArterySettings = - ArterySettings(ConfigFactory.load().getConfig("akka.remote.artery")) + ArterySettings(ConfigFactory.load().getConfig("pekko.remote.artery")) override def publishDropped(env: InboundEnvelope, reason: String): Unit = () } @@ -106,7 +106,7 @@ private[remote] class TestOutboundContext( } override lazy val settings: ArterySettings = - ArterySettings(ConfigFactory.load().getConfig("akka.remote.artery")) + ArterySettings(ConfigFactory.load().getConfig("pekko.remote.artery")) } diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/UntrustedSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/UntrustedSpec.scala index 744ab31290..ae91b8572f 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/UntrustedSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/UntrustedSpec.scala @@ -60,9 +60,9 @@ object UntrustedSpec { } val config = ConfigFactory.parseString(""" - akka.remote.artery.untrusted-mode = on - akka.remote.artery.trusted-selection-paths = ["/user/receptionist", ] - akka.loglevel = DEBUG # test verifies debug + pekko.remote.artery.untrusted-mode = on + pekko.remote.artery.trusted-selection-paths = ["/user/receptionist", ] + pekko.loglevel = DEBUG # test verifies debug """).withFallback(ArterySpecSupport.defaultConfig) } diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/aeron/AeronSinkSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/aeron/AeronSinkSpec.scala index c3f198b935..d0a2d6ea69 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/aeron/AeronSinkSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/aeron/AeronSinkSpec.scala @@ -25,7 +25,7 @@ import pekko.testkit.ImplicitSender import pekko.testkit.SocketUtil class AeronSinkSpec extends AkkaSpec(""" - akka.stream.materializer.debug.fuzzing-mode = on + pekko.stream.materializer.debug.fuzzing-mode = on """) with ImplicitSender { val driver = MediaDriver.launchEmbedded() diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/compress/CompressionIntegrationSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/compress/CompressionIntegrationSpec.scala index 8815b9188c..be2a42cb50 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/compress/CompressionIntegrationSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/compress/CompressionIntegrationSpec.scala @@ -19,7 +19,7 @@ import java.io.NotSerializableException object CompressionIntegrationSpec { val commonConfig = ConfigFactory.parseString(s""" - akka { + pekko { loglevel = INFO actor { @@ -137,8 +137,8 @@ class CompressionIntegrationSpec "not be advertised if ActorRef compression disabled" in { val config = """ - akka.remote.artery.advanced.compression.actor-refs.max = off - akka.remote.artery.advanced.compression { + pekko.remote.artery.advanced.compression.actor-refs.max = off + pekko.remote.artery.advanced.compression { actor-refs.advertisement-interval = 50 ms manifests.advertisement-interval = 50 ms } @@ -169,8 +169,8 @@ class CompressionIntegrationSpec "not be advertised if manifest compression disabled" in { val config = """ - akka.remote.artery.advanced.compression.manifests.max = off - akka.remote.artery.advanced.compression { + pekko.remote.artery.advanced.compression.manifests.max = off + pekko.remote.artery.advanced.compression { actor-refs.advertisement-interval = 50 ms manifests.advertisement-interval = 50 ms } @@ -261,7 +261,7 @@ class CompressionIntegrationSpec val port = address(systemB).port.get shutdown(systemB) val systemB2 = - newRemoteSystem(extraConfig = Some(s"akka.remote.artery.canonical.port=$port"), name = Some("systemB")) + newRemoteSystem(extraConfig = Some(s"pekko.remote.artery.canonical.port=$port"), name = Some("systemB")) // listen for compression table events val aManifestProbe = TestProbe()(system) @@ -332,7 +332,7 @@ class CompressionIntegrationSpec "wrap around" in { val extraConfig = """ - akka.remote.artery.advanced.compression { + pekko.remote.artery.advanced.compression { actor-refs.advertisement-interval = 100 millis manifests.advertisement-interval = 10 minutes } diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/compress/HandshakeShouldDropCompressionTableSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/compress/HandshakeShouldDropCompressionTableSpec.scala index 8b1da964ae..454b64f705 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/compress/HandshakeShouldDropCompressionTableSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/compress/HandshakeShouldDropCompressionTableSpec.scala @@ -21,7 +21,7 @@ import pekko.util.Timeout object HandshakeShouldDropCompressionTableSpec { val commonConfig = ConfigFactory.parseString(s""" - akka { + pekko { remote.artery.advanced.handshake-timeout = 10s remote.artery.advanced.aeron.image-liveness-timeout = 7s @@ -46,7 +46,8 @@ class HandshakeShouldDropCompressionTableSpec val portB = freePort() before { - systemB = newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"akka.remote.artery.canonical.port = $portB")) + systemB = + newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"pekko.remote.artery.canonical.port = $portB")) } "Outgoing compression table" must { @@ -93,7 +94,7 @@ class HandshakeShouldDropCompressionTableSpec log.info("SHUTTING DOWN system {}...", systemB) shutdown(systemB) systemB = - newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"akka.remote.artery.canonical.port = $portB")) + newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"pekko.remote.artery.canonical.port = $portB")) Thread.sleep(1000) log.info("SYSTEM READY {}...", systemB) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/tcp/TcpFramingSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/tcp/TcpFramingSpec.scala index 2b407c04e6..fba2369690 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/tcp/TcpFramingSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/tcp/TcpFramingSpec.scala @@ -17,7 +17,7 @@ import pekko.testkit.ImplicitSender import pekko.util.ByteString class TcpFramingSpec extends AkkaSpec(""" - akka.stream.materializer.debug.fuzzing-mode = on + pekko.stream.materializer.debug.fuzzing-mode = on """) with ImplicitSender { import TcpFraming.encodeFrameHeader diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/tcp/TlsTcpSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/tcp/TlsTcpSpec.scala index 902346151d..7cb86ce254 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/tcp/TlsTcpSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/tcp/TlsTcpSpec.scala @@ -28,7 +28,7 @@ class TlsTcpWithDefaultConfigSpec extends TlsTcpSpec(ConfigFactory.empty()) class TlsTcpWithSHA1PRNGSpec extends TlsTcpSpec(ConfigFactory.parseString(""" - akka.remote.artery.ssl.config-ssl-engine { + pekko.remote.artery.ssl.config-ssl-engine { random-number-generator = "SHA1PRNG" enabled-algorithms = ["TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"] } @@ -36,7 +36,7 @@ class TlsTcpWithSHA1PRNGSpec class TlsTcpWithDefaultRNGSecureSpec extends TlsTcpSpec(ConfigFactory.parseString(""" - akka.remote.artery.ssl.config-ssl-engine { + pekko.remote.artery.ssl.config-ssl-engine { random-number-generator = "" enabled-algorithms = ["TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"] } @@ -44,14 +44,14 @@ class TlsTcpWithDefaultRNGSecureSpec class TlsTcpWithCrappyRSAWithMD5OnlyHereToMakeSureThingsWorkSpec extends TlsTcpSpec(ConfigFactory.parseString(""" - akka.remote.artery.ssl.config-ssl-engine { + pekko.remote.artery.ssl.config-ssl-engine { random-number-generator = "" enabled-algorithms = [""SSL_RSA_WITH_NULL_MD5""] } """)) class TlsTcpWithRotatingKeysSSLEngineSpec extends TlsTcpSpec(ConfigFactory.parseString(s""" - akka.remote.artery.ssl { + pekko.remote.artery.ssl { ssl-engine-provider = org.apache.pekko.remote.artery.tcp.ssl.RotatingKeysSSLEngineProvider rotating-keys-engine { key-file = ${TlsTcpSpec.resourcePath("ssl/node.example.com.pem")} @@ -66,7 +66,7 @@ object TlsTcpSpec { lazy val config: Config = { ConfigFactory.parseString(s""" - akka.remote.artery { + pekko.remote.artery { transport = tls-tcp large-message-destinations = [ "/user/large" ] } @@ -85,11 +85,11 @@ abstract class TlsTcpSpec(config: Config) val rootB = RootActorPath(addressB) def isSupported: Boolean = { - val checked = system.settings.config.getString("akka.remote.artery.ssl.ssl-engine-provider") match { + val checked = system.settings.config.getString("pekko.remote.artery.ssl.ssl-engine-provider") match { case "org.apache.pekko.remote.artery.tcp.ConfigSSLEngineProvider" => - CipherSuiteSupportCheck.isSupported(system, "akka.remote.artery.ssl.config-ssl-engine") + CipherSuiteSupportCheck.isSupported(system, "pekko.remote.artery.ssl.config-ssl-engine") case "org.apache.pekko.remote.artery.tcp.ssl.RotatingKeysSSLEngineProvider" => - CipherSuiteSupportCheck.isSupported(system, "akka.remote.artery.ssl.rotating-keys-engine") + CipherSuiteSupportCheck.isSupported(system, "pekko.remote.artery.ssl.rotating-keys-engine") case other => fail( s"Don't know how to determine whether the crypto building blocks in [$other] are available on this platform") @@ -148,12 +148,12 @@ abstract class TlsTcpSpec(config: Config) class TlsTcpWithHostnameVerificationSpec extends ArteryMultiNodeSpec(ConfigFactory.parseString(""" - akka.remote.artery.ssl.config-ssl-engine { + pekko.remote.artery.ssl.config-ssl-engine { hostname-verification = on } - akka.remote.use-unsafe-remote-features-outside-cluster = on + pekko.remote.use-unsafe-remote-features-outside-cluster = on - akka.loggers = ["org.apache.pekko.testkit.TestEventListener"] + pekko.loggers = ["org.apache.pekko.testkit.TestEventListener"] """).withFallback(TlsTcpSpec.config)) with ImplicitSender { @@ -166,7 +166,7 @@ class TlsTcpWithHostnameVerificationSpec val systemB = newRemoteSystem( // The subjectAltName is 'localhost', so connecting to '127.0.0.1' should not // work when using hostname verification: - extraConfig = Some("""akka.remote.artery.canonical.hostname = "127.0.0.1""""), + extraConfig = Some("""pekko.remote.artery.canonical.hostname = "127.0.0.1""""), name = Some("systemB")) val addressB = address(systemB) @@ -195,10 +195,10 @@ class TlsTcpWithHostnameVerificationSpec val systemB = newRemoteSystem( extraConfig = Some(""" // The subjectAltName is 'localhost', so this is how we want to be known: - akka.remote.artery.canonical.hostname = "localhost" + pekko.remote.artery.canonical.hostname = "localhost" // Though we will still bind to 127.0.0.1 (make sure it's not ipv6) - akka.remote.artery.bind.hostname = "127.0.0.1" + pekko.remote.artery.bind.hostname = "127.0.0.1" """), name = Some("systemB")) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/tcp/ssl/RotatingKeysSSLEngineProviderSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/tcp/ssl/RotatingKeysSSLEngineProviderSpec.scala index 74287ca208..9635e3cbec 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/artery/tcp/ssl/RotatingKeysSSLEngineProviderSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/artery/tcp/ssl/RotatingKeysSSLEngineProviderSpec.scala @@ -158,21 +158,21 @@ object RotatingKeysSSLEngineProviderSpec { private val arteryNode001Id = "ssl/artery-nodes/artery-node001.example.com" private val baseConfig = """ - akka.loglevel = debug + pekko.loglevel = debug - akka.remote.artery { + pekko.remote.artery { ## the large-messages channel in artery is not used for this tests ## but we're enabling it to test it also creates its own SSLEngine large-message-destinations = [ "/user/large" ] } - akka.remote.artery.ssl { + pekko.remote.artery.ssl { ssl-engine-provider = org.apache.pekko.remote.artery.tcp.ssl.RotatingKeysSSLEngineProvider } """ val resourcesConfig: String = baseConfig + s""" - akka.remote.artery.ssl.rotating-keys-engine { + pekko.remote.artery.ssl.rotating-keys-engine { key-file = ${getClass.getClassLoader.getResource(s"$arteryNode001Id.pem").getPath} cert-file = ${getClass.getClassLoader.getResource(s"$arteryNode001Id.crt").getPath} ca-cert-file = ${getClass.getClassLoader.getResource("ssl/exampleca.crt").getPath} @@ -186,7 +186,7 @@ object RotatingKeysSSLEngineProviderSpec { val cacertLocation = new File(temporaryDirectory.toFile, "ca.crt") val tempFileConfig: String = baseConfig + s""" - akka.remote.artery.ssl.rotating-keys-engine { + pekko.remote.artery.ssl.rotating-keys-engine { key-file = ${temporaryDirectory.toFile.getAbsolutePath}/tls.key cert-file = ${temporaryDirectory.toFile.getAbsolutePath}/tls.crt ca-cert-file = ${temporaryDirectory.toFile.getAbsolutePath}/ca.crt diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/ActorsLeakSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/ActorsLeakSpec.scala index 7acd2a99d2..7ca9358edd 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/ActorsLeakSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/ActorsLeakSpec.scala @@ -22,19 +22,19 @@ import pekko.testkit.TestActors.EchoActor object ActorsLeakSpec { val config = ConfigFactory.parseString(""" - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.provider = remote - akka.remote.artery.enabled = false - akka.remote.classic.netty.tcp.applied-adapters = ["trttl"] - #akka.remote.log-lifecycle-events = on - akka.remote.classic.transport-failure-detector.heartbeat-interval = 1 s - akka.remote.classic.transport-failure-detector.acceptable-heartbeat-pause = 3 s - akka.remote.classic.quarantine-after-silence = 3 s - akka.remote.use-unsafe-remote-features-outside-cluster = on - akka.test.filter-leeway = 12 s + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.provider = remote + pekko.remote.artery.enabled = false + pekko.remote.classic.netty.tcp.applied-adapters = ["trttl"] + #pekko.remote.log-lifecycle-events = on + pekko.remote.classic.transport-failure-detector.heartbeat-interval = 1 s + pekko.remote.classic.transport-failure-detector.acceptable-heartbeat-pause = 3 s + pekko.remote.classic.quarantine-after-silence = 3 s + pekko.remote.use-unsafe-remote-features-outside-cluster = on + pekko.test.filter-leeway = 12 s # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """) def collectLiveActors(root: Option[ActorRef]): immutable.Seq[ActorRef] = { @@ -92,7 +92,7 @@ class ActorsLeakSpec extends AkkaSpec(ActorsLeakSpec.config) with ImplicitSender val remoteSystem = ActorSystem( "remote", - ConfigFactory.parseString("akka.remote.classic.netty.tcp.port = 0").withFallback(config)) + ConfigFactory.parseString("pekko.remote.classic.netty.tcp.port = 0").withFallback(config)) try { val probe = TestProbe()(remoteSystem) @@ -114,8 +114,8 @@ class ActorsLeakSpec extends AkkaSpec(ActorsLeakSpec.config) with ImplicitSender ActorSystem( "remote", ConfigFactory.parseString(""" - akka.remote.artery.enabled = false - akka.remote.classic.netty.tcp.port = 2553 + pekko.remote.artery.enabled = false + pekko.remote.classic.netty.tcp.port = 2553 """.stripMargin).withFallback(config)) try { @@ -158,8 +158,8 @@ class ActorsLeakSpec extends AkkaSpec(ActorsLeakSpec.config) with ImplicitSender ActorSystem( "remote", ConfigFactory.parseString(""" - akka.remote.artery.enabled = off - akka.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.enabled = off + pekko.remote.classic.netty.tcp.port = 0 """.stripMargin).withFallback(config)) val remoteAddress = RARP(remoteSystem).provider.getDefaultAddress @@ -185,7 +185,7 @@ class ActorsLeakSpec extends AkkaSpec(ActorsLeakSpec.config) with ImplicitSender // Remote idle for too long case val remoteSystem = - ActorSystem("remote", ConfigFactory.parseString("akka.remote.classic.netty.tcp.port = 0").withFallback(config)) + ActorSystem("remote", ConfigFactory.parseString("pekko.remote.classic.netty.tcp.port = 0").withFallback(config)) val remoteAddress = RARP(remoteSystem).provider.getDefaultAddress remoteSystem.actorOf(Props[StoppableActor](), "stoppable") diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteDeathWatchSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteDeathWatchSpec.scala index d773cf19cd..121931c624 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteDeathWatchSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteDeathWatchSpec.scala @@ -18,7 +18,7 @@ import pekko.testkit.{ SocketUtil, _ } @nowarn // classic deprecated class RemoteDeathWatchSpec extends AkkaSpec(ConfigFactory.parseString(""" -akka { +pekko { actor { provider = remote deployment { @@ -38,8 +38,8 @@ akka { } } # test is using Java serialization and not priority to rewrite -akka.actor.allow-java-serialization = on -akka.actor.warn-about-java-serializer-usage = off +pekko.actor.allow-java-serialization = on +pekko.actor.warn-about-java-serializer-usage = off """)) with ImplicitSender with DefaultTimeout @@ -52,9 +52,9 @@ akka.actor.warn-about-java-serializer-usage = off val other = ActorSystem( "other", ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.remote.artery.enabled = off - akka.remote.classic.netty.tcp.port=2666 + pekko.loglevel = DEBUG + pekko.remote.artery.enabled = off + pekko.remote.classic.netty.tcp.port=2666 """).withFallback(system.settings.config)) override def beforeTermination(): Unit = { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteDeploymentAllowListSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteDeploymentAllowListSpec.scala index 62073446be..e910f4f025 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteDeploymentAllowListSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteDeploymentAllowListSpec.scala @@ -54,15 +54,15 @@ object RemoteDeploymentAllowListSpec { } val cfg: Config = ConfigFactory.parseString(s""" - akka { + pekko { actor.provider = remote remote { use-unsafe-remote-features-outside-cluster = on classic.enabled-transports = [ - "akka.remote.test", - "akka.remote.classic.netty.tcp" + "pekko.remote.test", + "pekko.remote.classic.netty.tcp" ] classic { @@ -92,8 +92,8 @@ object RemoteDeploymentAllowListSpec { } } # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """) def muteSystem(system: ActorSystem): Unit = { @@ -115,13 +115,13 @@ class RemoteDeploymentAllowListSpec val conf = ConfigFactory.parseString(""" - akka.remote.test { + pekko.remote.test { local-address = "test://remote-sys@localhost:12346" maximum-payload-bytes = 48000 bytes } //#allow-list-config - akka.remote.deployment { + pekko.remote.deployment { enable-allow-list = on allowed-actor-classes = [ diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteInitErrorSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteInitErrorSpec.scala index e65ddcdb5a..7d704f4a52 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteInitErrorSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteInitErrorSpec.scala @@ -25,7 +25,7 @@ import pekko.util.ccompat.JavaConverters._ */ class RemoteInitErrorSpec extends AnyWordSpec with Matchers { val conf = ConfigFactory.parseString(""" - akka { + pekko { actor { provider = remote } @@ -49,7 +49,7 @@ class RemoteInitErrorSpec extends AnyWordSpec with Matchers { "shut down properly on RemoteActorRefProvider initialization failure" in { val start = currentThreadIds() try { - ActorSystem("duplicate", ConfigFactory.parseString("akka.loglevel=OFF").withFallback(conf)) + ActorSystem("duplicate", ConfigFactory.parseString("pekko.loglevel=OFF").withFallback(conf)) fail("initialization should fail due to invalid IP address") } catch { case NonFatal(_) => { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteSettingsSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteSettingsSpec.scala index 64fa4634cc..463c2b1ac5 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteSettingsSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteSettingsSpec.scala @@ -15,14 +15,14 @@ import org.apache.pekko.remote.RemoteSettings class RemoteSettingsSpec extends AnyWordSpec with Matchers { "Remote settings" must { - "default akka.remote.classic.log-frame-size-exceeding to off" in { + "default pekko.remote.classic.log-frame-size-exceeding to off" in { new RemoteSettings(ConfigFactory.load()).LogFrameSizeExceeding shouldEqual None } - "parse akka.remote.classic.log-frame-size-exceeding value as bytes" in { + "parse pekko.remote.classic.log-frame-size-exceeding value as bytes" in { new RemoteSettings( ConfigFactory - .parseString("akka.remote.classic.log-frame-size-exceeding = 100b") + .parseString("pekko.remote.classic.log-frame-size-exceeding = 100b") .withFallback(ConfigFactory.load())).LogFrameSizeExceeding shouldEqual Some(100) } } diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteWatcherSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteWatcherSpec.scala index 745db8232a..bc8f68b878 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteWatcherSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemoteWatcherSpec.scala @@ -71,7 +71,7 @@ object RemoteWatcherSpec { @nowarn("msg=deprecated") class RemoteWatcherSpec extends AkkaSpec(""" - akka { + pekko { loglevel = INFO log-dead-letters-during-shutdown = false actor.provider = remote @@ -83,8 +83,8 @@ class RemoteWatcherSpec extends AkkaSpec(""" remote.use-unsafe-remote-features-outside-cluster = on } # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """) with ImplicitSender { import RemoteWatcher._ diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemotingSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemotingSpec.scala index 04a37c7c7a..01acc892d9 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemotingSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/RemotingSpec.scala @@ -82,7 +82,7 @@ object RemotingSpec { hostname = "localhost" } - akka { + pekko { actor.provider = remote # test is using Java serialization and not priority to rewrite actor.allow-java-serialization = on @@ -96,9 +96,9 @@ object RemotingSpec { log-remote-lifecycle-events = on enabled-transports = [ - "akka.remote.classic.test", - "akka.remote.classic.netty.tcp", - "akka.remote.classic.netty.ssl" + "pekko.remote.classic.test", + "pekko.remote.classic.netty.tcp", + "pekko.remote.classic.netty.ssl" ] netty.tcp = $${common-netty-settings} @@ -141,8 +141,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D import RemotingSpec._ val conf = ConfigFactory.parseString(""" - akka.remote.artery.enabled = false - akka.remote.classic.test { + pekko.remote.artery.enabled = false + pekko.remote.classic.test { local-address = "test://remote-sys@localhost:12346" maximum-payload-bytes = 48000 bytes } @@ -204,7 +204,7 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D private def byteStringOfSize(size: Int) = ByteString.fromArray(Array.fill(size)(42: Byte)) - val maxPayloadBytes = system.settings.config.getBytes("akka.remote.classic.test.maximum-payload-bytes").toInt + val maxPayloadBytes = system.settings.config.getBytes("pekko.remote.classic.test.maximum-payload-bytes").toInt override def afterTermination(): Unit = { shutdown(remoteSystem) @@ -241,7 +241,7 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D "not be exhausted by sending to broken connections" in { val tcpOnlyConfig = ConfigFactory - .parseString("""akka.remote.enabled-transports = ["akka.remote.classic.netty.tcp"]""") + .parseString("""pekko.remote.enabled-transports = ["pekko.remote.classic.netty.tcp"]""") .withFallback(remoteSystem.settings.config) val moreSystems = Vector.fill(5)(ActorSystem(remoteSystem.name, tcpOnlyConfig)) moreSystems.foreach { sys => @@ -508,8 +508,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D val config = ConfigFactory .parseString( """ - akka.remote.classic.enabled-transports = ["akka.remote.classic.test", "akka.remote.classic.netty.tcp"] - akka.remote.classic.test.local-address = "test://other-system@localhost:12347" + pekko.remote.classic.enabled-transports = ["pekko.remote.classic.test", "pekko.remote.classic.netty.tcp"] + pekko.remote.classic.test.local-address = "test://other-system@localhost:12347" """) .withFallback(remoteSystem.settings.config) val otherSystem = ActorSystem("other-system", config) @@ -546,10 +546,10 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D val remoteAddress = Address("akka.test", "system2", "localhost", 2) val config = ConfigFactory.parseString(s""" - akka.remote.enabled-transports = ["akka.remote.classic.test"] - akka.remote.retry-gate-closed-for = 5s + pekko.remote.enabled-transports = ["pekko.remote.classic.test"] + pekko.remote.retry-gate-closed-for = 5s - akka.remote.classic.test { + pekko.remote.classic.test { registry-key = tFdVxq local-address = "test://${localAddress.system}@${localAddress.host.get}:${localAddress.port.get}" } @@ -604,11 +604,11 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D val rawRemoteAddress = remoteAddress.copy(protocol = "test") val config = ConfigFactory.parseString(s""" - akka.remote.classic.enabled-transports = ["akka.remote.classic.test"] - akka.remote.classic.retry-gate-closed-for = 5s - akka.remote.classic.log-remote-lifecycle-events = on + pekko.remote.classic.enabled-transports = ["pekko.remote.classic.test"] + pekko.remote.classic.retry-gate-closed-for = 5s + pekko.remote.classic.log-remote-lifecycle-events = on - akka.remote.classic.test { + pekko.remote.classic.test { registry-key = TRKAzR local-address = "test://${localAddress.system}@${localAddress.host.get}:${localAddress.port.get}" } @@ -686,11 +686,11 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D val remoteUID = 16 val config = ConfigFactory.parseString(s""" - akka.remote.classic.enabled-transports = ["akka.remote.classic.test"] - akka.remote.classic.retry-gate-closed-for = 5s - akka.remote.classic.log-remote-lifecycle-events = on + pekko.remote.classic.enabled-transports = ["pekko.remote.classic.test"] + pekko.remote.classic.retry-gate-closed-for = 5s + pekko.remote.classic.log-remote-lifecycle-events = on - akka.remote.classic.test { + pekko.remote.classic.test { registry-key = JMeMndLLsw local-address = "test://${localAddress.system}@${localAddress.host.get}:${localAddress.port.get}" } @@ -771,7 +771,7 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D retries: Int = 3): (ActorSystem, ActorSelection) = { val otherAddress = temporaryServerAddress() val otherConfig = ConfigFactory.parseString(s""" - akka.remote.classic.netty.tcp.port = ${otherAddress.getPort} + pekko.remote.classic.netty.tcp.port = ${otherAddress.getPort} """).withFallback(config) val otherSelection = thisSystem.actorSelection(s"akka.tcp://other-system@localhost:${otherAddress.getPort}/user/echo") @@ -789,9 +789,9 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D "be able to connect to system even if it's not there at first" in { val config = ConfigFactory.parseString(s""" - akka.remote.classic.enabled-transports = ["akka.remote.classic.netty.tcp"] - akka.remote.classic.netty.tcp.port = 0 - akka.remote.classic.retry-gate-closed-for = 5s + pekko.remote.classic.enabled-transports = ["pekko.remote.classic.netty.tcp"] + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.classic.retry-gate-closed-for = 5s """).withFallback(remoteSystem.settings.config) val thisSystem = ActorSystem("this-system", config) try { @@ -818,9 +818,9 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D "allow other system to connect even if it's not there at first" in { val config = ConfigFactory.parseString(s""" - akka.remote.classic.enabled-transports = ["akka.remote.classic.netty.tcp"] - akka.remote.classic.netty.tcp.port = 0 - akka.remote.classic.retry-gate-closed-for = 5s + pekko.remote.classic.enabled-transports = ["pekko.remote.classic.netty.tcp"] + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.classic.retry-gate-closed-for = 5s """).withFallback(remoteSystem.settings.config) val thisSystem = ActorSystem("this-system", config) try { @@ -830,7 +830,7 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D thisSystem.actorOf(Props[Echo2](), "echo") val otherAddress = temporaryServerAddress() val otherConfig = ConfigFactory.parseString(s""" - akka.remote.classic.netty.tcp.port = ${otherAddress.getPort} + pekko.remote.classic.netty.tcp.port = ${otherAddress.getPort} """).withFallback(config) val otherSelection = thisSystem.actorSelection(s"akka.tcp://other-system@localhost:${otherAddress.getPort}/user/echo") diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/UntrustedSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/UntrustedSpec.scala index b0708f50c8..83ecd4e5a5 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/UntrustedSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/UntrustedSpec.scala @@ -64,18 +64,18 @@ object UntrustedSpec { } class UntrustedSpec extends AkkaSpec(""" -akka.loglevel = DEBUG -akka.actor.provider = remote -akka.remote.artery.enabled = off -akka.remote.warn-about-direct-use = off -akka.remote.classic.untrusted-mode = on -akka.remote.classic.trusted-selection-paths = ["/user/receptionist", ] -akka.remote.classic.netty.tcp.port = 0 -akka.loglevel = DEBUG # test verifies debug +pekko.loglevel = DEBUG +pekko.actor.provider = remote +pekko.remote.artery.enabled = off +pekko.remote.warn-about-direct-use = off +pekko.remote.classic.untrusted-mode = on +pekko.remote.classic.trusted-selection-paths = ["/user/receptionist", ] +pekko.remote.classic.netty.tcp.port = 0 +pekko.loglevel = DEBUG # test verifies debug # test is using Java serialization and not priority to rewrite -akka.actor.allow-java-serialization = on -akka.actor.warn-about-java-serializer-usage = off -akka.actor.serialization-bindings { +pekko.actor.allow-java-serialization = on +pekko.actor.warn-about-java-serializer-usage = off +pekko.actor.serialization-bindings { "org.apache.pekko.actor.Terminated" = java-test } """) with ImplicitSender { @@ -85,15 +85,15 @@ akka.actor.serialization-bindings { val client = ActorSystem( "UntrustedSpec-client", ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.actor.provider = remote - akka.remote.artery.enabled = off - akka.remote.warn-about-direct-use = off - akka.remote.classic.netty.tcp.port = 0 + pekko.loglevel = DEBUG + pekko.actor.provider = remote + pekko.remote.artery.enabled = off + pekko.remote.warn-about-direct-use = off + pekko.remote.classic.netty.tcp.port = 0 # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = off - akka.actor.warn-about-java-serializer-usage = off - akka.actor.serialization-bindings { + pekko.actor.allow-java-serialization = off + pekko.actor.warn-about-java-serializer-usage = off + pekko.actor.serialization-bindings { "org.apache.pekko.actor.Terminated" = java-test } """)) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/AkkaProtocolSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/AkkaProtocolSpec.scala index 430b10be7d..b9e169ed77 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/AkkaProtocolSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/AkkaProtocolSpec.scala @@ -46,10 +46,10 @@ object AkkaProtocolSpec { } @nowarn("msg=deprecated") -class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) with ImplicitSender { +class AkkaProtocolSpec extends AkkaSpec("""pekko.actor.provider = remote """) with ImplicitSender { val conf = ConfigFactory.parseString(""" - akka.remote { + pekko.remote { @@ -69,8 +69,8 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit } # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """).withFallback(system.settings.config) val localAddress = Address("test", "testsystem", "testhost", 1234) @@ -408,7 +408,7 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit val statusPromise: Promise[AssociationHandle] = Promise() val conf2 = - ConfigFactory.parseString("akka.remote.classic.netty.tcp.connection-timeout = 500 ms").withFallback(conf) + ConfigFactory.parseString("pekko.remote.classic.netty.tcp.connection-timeout = 500 ms").withFallback(conf) val stateActor = system.actorOf( ProtocolStateActor.outboundProps( @@ -432,7 +432,7 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit val (failureDetector, _, _, handle) = collaborators val conf2 = - ConfigFactory.parseString("akka.remote.classic.netty.tcp.connection-timeout = 500 ms").withFallback(conf) + ConfigFactory.parseString("pekko.remote.classic.netty.tcp.connection-timeout = 500 ms").withFallback(conf) val reader = system.actorOf( ProtocolStateActor.inboundProps( diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/AkkaProtocolStressTest.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/AkkaProtocolStressTest.scala index 214cb2672c..f39d60eb5e 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/AkkaProtocolStressTest.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/AkkaProtocolStressTest.scala @@ -19,7 +19,7 @@ import pekko.testkit.{ AkkaSpec, DefaultTimeout, ImplicitSender, TimingTest, _ } object AkkaProtocolStressTest { val configA: Config = ConfigFactory.parseString(""" - akka { + pekko { #loglevel = DEBUG actor.provider = remote remote.artery.enabled = off @@ -43,8 +43,8 @@ object AkkaProtocolStressTest { } # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """) object ResendFinal diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/GenericTransportSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/GenericTransportSpec.scala index 972b39c973..7ad2ce9525 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/GenericTransportSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/GenericTransportSpec.scala @@ -21,11 +21,11 @@ import pekko.util.ByteString @nowarn("msg=deprecated") abstract class GenericTransportSpec(withAkkaProtocol: Boolean = false) extends AkkaSpec(""" - akka.remote.artery.enabled = false - akka.actor.provider = remote + pekko.remote.artery.enabled = false + pekko.actor.provider = remote # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """) with DefaultTimeout with ImplicitSender { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/SystemMessageDeliveryStressTest.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/SystemMessageDeliveryStressTest.scala index d31f775bb9..87c7f81981 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/SystemMessageDeliveryStressTest.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/SystemMessageDeliveryStressTest.scala @@ -25,7 +25,7 @@ object SystemMessageDeliveryStressTest { val burstDelay = 500.millis val baseConfig: Config = ConfigFactory.parseString(s""" - akka { + pekko { #loglevel = DEBUG remote.artery.enabled = false actor.provider = remote @@ -196,9 +196,9 @@ abstract class SystemMessageDeliveryStressTest(msg: String, cfg: String) class SystemMessageDeliveryRetryGate extends SystemMessageDeliveryStressTest( "passive connections on", - """akka.remote.classic.retry-gate-closed-for = 0.5 s""") + """pekko.remote.classic.retry-gate-closed-for = 0.5 s""") class SystemMessageDeliveryNoPassiveRetryGate extends SystemMessageDeliveryStressTest("passive connections off", """ - akka.remote.classic.use-passive-connections = off - akka.remote.classic.retry-gate-closed-for = 0.5 s + pekko.remote.classic.use-passive-connections = off + pekko.remote.classic.retry-gate-closed-for = 0.5 s """) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/ThrottlerTransportAdapterSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/ThrottlerTransportAdapterSpec.scala index ea50770d4e..cb9f3ebcbf 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/ThrottlerTransportAdapterSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/ThrottlerTransportAdapterSpec.scala @@ -21,7 +21,7 @@ import pekko.testkit.{ AkkaSpec, DefaultTimeout, EventFilter, ImplicitSender, Te object ThrottlerTransportAdapterSpec { val configA: Config = ConfigFactory.parseString(""" - akka { + pekko { actor.provider = remote remote.artery.enabled = off @@ -35,8 +35,8 @@ object ThrottlerTransportAdapterSpec { remote.classic.netty.tcp.port = 0 } # test is using Java serialization and not priority to rewrite - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """) class Echo extends Actor { diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/netty/NettyTransportSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/netty/NettyTransportSpec.scala index bd17496b7f..2c5fa7f804 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/netty/NettyTransportSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/classic/transport/netty/NettyTransportSpec.scala @@ -21,7 +21,7 @@ import pekko.testkit.SocketUtil object NettyTransportSpec { val commonConfig = ConfigFactory.parseString(""" - akka.actor.provider = remote + pekko.actor.provider = remote """) def getInternal()(implicit sys: ActorSystem) = @@ -49,8 +49,8 @@ class NettyTransportSpec extends AnyWordSpec with Matchers with BindBehavior { "bind to a random port" in { val bindConfig = ConfigFactory.parseString(s""" - akka.remote.artery.enabled = false - akka.remote.classic.netty.tcp { + pekko.remote.artery.enabled = false + pekko.remote.classic.netty.tcp { port = 0 } """) @@ -67,8 +67,8 @@ class NettyTransportSpec extends AnyWordSpec with Matchers with BindBehavior { try { val bindConfig = ConfigFactory.parseString(s""" - akka.remote.artery.enabled = false - akka.remote.classic.netty.tcp { + pekko.remote.artery.enabled = false + pekko.remote.classic.netty.tcp { port = ${address.getPort} bind-port = 0 } @@ -95,8 +95,8 @@ class NettyTransportSpec extends AnyWordSpec with Matchers with BindBehavior { val address = SocketUtil.temporaryServerAddress(InetAddress.getLocalHost.getHostAddress, udp = false) val bindConfig = ConfigFactory.parseString(s""" - akka.remote.artery.enabled = false - akka.remote.classic.netty.tcp { + pekko.remote.artery.enabled = false + pekko.remote.classic.netty.tcp { port = 0 bind-port = ${address.getPort} } @@ -111,8 +111,8 @@ class NettyTransportSpec extends AnyWordSpec with Matchers with BindBehavior { "bind to all interfaces" in { val bindConfig = ConfigFactory.parseString(s""" - akka.remote.artery.enabled = false - akka.remote.classic { + pekko.remote.artery.enabled = false + pekko.remote.classic { netty.tcp.bind-hostname = "0.0.0.0" } """) @@ -137,13 +137,13 @@ trait BindBehavior { val address = SocketUtil.temporaryServerAddress() val bindConfig = ConfigFactory.parseString(s""" - akka.remote.artery.enabled = false - akka.remote.classic { + pekko.remote.artery.enabled = false + pekko.remote.classic { netty.tcp { hostname = ${address.getAddress.getHostAddress} port = ${address.getPort} } - enabled-transports = ["akka.remote.classic.netty.tcp"] + enabled-transports = ["pekko.remote.classic.netty.tcp"] } """) implicit val sys = ActorSystem("sys", bindConfig.withFallback(commonConfig)) @@ -166,8 +166,8 @@ trait BindBehavior { } val bindConfig = ConfigFactory.parseString(s""" - akka.remote.artery.enabled = false - akka.remote.classic { + pekko.remote.artery.enabled = false + pekko.remote.classic { netty.tcp { hostname = ${address.getAddress.getHostAddress} port = ${address.getPort} @@ -175,7 +175,7 @@ trait BindBehavior { bind-hostname = ${bindAddress.getAddress.getHostAddress} bind-port = ${bindAddress.getPort} } - enabled-transports = ["akka.remote.classic.netty.tcp"] + enabled-transports = ["pekko.remote.classic.netty.tcp"] } """) implicit val sys = ActorSystem("sys", bindConfig.withFallback(commonConfig)) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/AllowJavaSerializationOffSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/AllowJavaSerializationOffSpec.scala index 990a7fd172..9d0e0e95ea 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/AllowJavaSerializationOffSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/AllowJavaSerializationOffSpec.scala @@ -31,7 +31,7 @@ object AllowJavaSerializationOffSpec { val bootstrapSettings = BootstrapSetup( None, Some(ConfigFactory.parseString(""" - akka { + pekko { actor { allow-java-serialization = on @@ -50,7 +50,7 @@ object AllowJavaSerializationOffSpec { val noJavaSerializationSystem = ActorSystem( "AllowJavaSerializationOffSpec" + "NoJavaSerialization", ConfigFactory.parseString(""" - akka { + pekko { actor { allow-java-serialization = off # this is by default on, but tests are running with off, use defaults here @@ -82,7 +82,7 @@ class AllowJavaSerializationOffSpec val addedJavaSerializationProgramaticallyButDisabledSettings = BootstrapSetup( None, Some(ConfigFactory.parseString(""" - akka { + pekko { loglevel = debug actor { allow-java-serialization = off @@ -111,7 +111,7 @@ class AllowJavaSerializationOffSpec "throw if passed system to JavaSerializer has allow-java-serialization = off" in { intercept[DisabledJavaSerializer.JavaSerializationException] { new JavaSerializer(noJavaSerializationSystem.asInstanceOf[ExtendedActorSystem]) - }.getMessage should include("akka.actor.allow-java-serialization = off") + }.getMessage should include("pekko.actor.allow-java-serialization = off") intercept[DisabledJavaSerializer.JavaSerializationException] { SerializationExtension(dontAllowJavaSystem) diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/DaemonMsgCreateSerializerAllowJavaSerializationSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/DaemonMsgCreateSerializerAllowJavaSerializationSpec.scala index 37dd36f0b4..c7c9fdb946 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/DaemonMsgCreateSerializerAllowJavaSerializationSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/DaemonMsgCreateSerializerAllowJavaSerializationSpec.scala @@ -69,8 +69,8 @@ private[pekko] trait SerializationVerification { self: AkkaSpec => class DaemonMsgCreateSerializerAllowJavaSerializationSpec extends AkkaSpec(""" # test is verifying Java serialization - akka.actor.allow-java-serialization = on - akka.actor.warn-about-java-serializer-usage = off + pekko.actor.allow-java-serialization = on + pekko.actor.warn-about-java-serializer-usage = off """) with SerializationVerification { @@ -129,7 +129,7 @@ class DaemonMsgCreateSerializerAllowJavaSerializationSpec } class DaemonMsgCreateSerializerNoJavaSerializationSpec extends AkkaSpec(""" - akka.actor.allow-java-serialization=off + pekko.actor.allow-java-serialization=off """) with SerializationVerification { import DaemonMsgCreateSerializerAllowJavaSerializationSpec.MyActor diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/MiscMessageSerializerSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/MiscMessageSerializerSpec.scala index 36532f426d..0d02186647 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/MiscMessageSerializerSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/MiscMessageSerializerSpec.scala @@ -28,9 +28,9 @@ import pekko.testkit.JavaSerializable object MiscMessageSerializerSpec { val serializationTestOverrides = s""" - akka.actor { + pekko.actor { serialization-bindings = { - "org.apache.pekko.remote.serialization.MiscMessageSerializerSpec$$TestException" = akka-misc + "org.apache.pekko.remote.serialization.MiscMessageSerializerSpec$$TestException" = pekko-misc } } """ diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/ProtobufSerializerSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/ProtobufSerializerSpec.scala index 18ad0c307a..df99d0c994 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/ProtobufSerializerSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/ProtobufSerializerSpec.scala @@ -46,7 +46,7 @@ object MaliciousMessage { } class ProtobufSerializerSpec extends AkkaSpec(s""" - akka.serialization.protobuf.allowed-classes = [ + pekko.serialization.protobuf.allowed-classes = [ "com.google.protobuf.GeneratedMessage", "com.google.protobuf.GeneratedMessageV3", "scalapb.GeneratedMessageCompanion", diff --git a/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/SerializationTransportInformationSpec.scala b/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/SerializationTransportInformationSpec.scala index a80f340a40..935318d47d 100644 --- a/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/SerializationTransportInformationSpec.scala +++ b/akka-remote/src/test/scala/org/apache/pekko/remote/serialization/SerializationTransportInformationSpec.scala @@ -75,7 +75,7 @@ object SerializationTransportInformationSpec { abstract class AbstractSerializationTransportInformationSpec(config: Config) extends AkkaSpec(config.withFallback( ConfigFactory.parseString(""" - akka { + pekko { loglevel = info actor { provider = remote @@ -133,8 +133,8 @@ abstract class AbstractSerializationTransportInformationSpec(config: Config) class SerializationTransportInformationSpec extends AbstractSerializationTransportInformationSpec(ConfigFactory.parseString(""" - akka.remote.artery.enabled = off - akka.remote.classic.netty.tcp { + pekko.remote.artery.enabled = off + pekko.remote.classic.netty.tcp { hostname = localhost port = 0 } diff --git a/akka-serialization-jackson/src/main/resources/reference.conf b/akka-serialization-jackson/src/main/resources/reference.conf index e99e465149..25fe23ce5a 100644 --- a/akka-serialization-jackson/src/main/resources/reference.conf +++ b/akka-serialization-jackson/src/main/resources/reference.conf @@ -1,18 +1,18 @@ ########################################## -# Akka Serialization Jackson Config File # +# Pekko Serialization Jackson Config File # ########################################## # This is the reference config file that contains all the default settings. # Make your edits/overrides in your application.conf. #//#jackson-modules -akka.serialization.jackson { +pekko.serialization.jackson { # The Jackson JSON serializer will register these modules. jackson-modules += "org.apache.pekko.serialization.jackson.AkkaJacksonModule" - # AkkaTypedJacksonModule optionally included if akka-actor-typed is in classpath + # AkkaTypedJacksonModule optionally included if pekko-actor-typed is in classpath jackson-modules += "org.apache.pekko.serialization.jackson.AkkaTypedJacksonModule" - # AkkaStreamsModule optionally included if akka-streams is in classpath + # AkkaStreamsModule optionally included if pekko-streams is in classpath jackson-modules += "org.apache.pekko.serialization.jackson.AkkaStreamJacksonModule" jackson-modules += "com.fasterxml.jackson.module.paramnames.ParameterNamesModule" jackson-modules += "com.fasterxml.jackson.datatype.jdk8.Jdk8Module" @@ -21,8 +21,8 @@ akka.serialization.jackson { } #//#jackson-modules -akka.serialization.jackson { - # When enabled and akka.loglevel=DEBUG serialization time and payload size +pekko.serialization.jackson { + # When enabled and pekko.loglevel=DEBUG serialization time and payload size # is logged for each messages. verbose-debug-logging = off @@ -36,7 +36,7 @@ akka.serialization.jackson { } #//#features -akka.serialization.jackson { +pekko.serialization.jackson { # Configuration of the ObjectMapper serialization features. # See com.fasterxml.jackson.databind.SerializationFeature # Enum values corresponding to the SerializationFeature and their boolean value. @@ -147,7 +147,7 @@ akka.serialization.jackson { # Additional classes that are allowed even if they are not defined in `serialization-bindings`. # This is useful when a class is not used for serialization any more and therefore removed # from `serialization-bindings`, but should still be possible to deserialize. - allowed-class-prefix = ${akka.serialization.jackson.whitelist-class-prefix} + allowed-class-prefix = ${pekko.serialization.jackson.whitelist-class-prefix} # settings for compression of the payload @@ -169,8 +169,8 @@ akka.serialization.jackson { # concrete class, or if it is a supertype that uses Jackson polymorphism (ie, the # @JsonTypeInfo annotation) to store type information in the JSON itself. The intention behind # disabling this is to remove extraneous type information (ie, fully qualified class names) when - # serialized objects are persisted in Akka persistence or replicated using Akka distributed - # data. Note that Akka remoting already has manifest compression optimizations that address this, + # serialized objects are persisted in Pekko persistence or replicated using Pekko distributed + # data. Note that Pekko remoting already has manifest compression optimizations that address this, # so for types that just get sent over remoting, this offers no optimization. type-in-manifest = on @@ -186,23 +186,23 @@ akka.serialization.jackson { deserialization-type = "" # Specific settings for jackson-json binding can be defined in this section to - # override the settings in 'akka.serialization.jackson' + # override the settings in 'pekko.serialization.jackson' jackson-json {} # Specific settings for jackson-cbor binding can be defined in this section to - # override the settings in 'akka.serialization.jackson' + # override the settings in 'pekko.serialization.jackson' jackson-cbor {} # Issue #28918 for compatibility with data serialized with JacksonCborSerializer in - # Akka 2.6.4 or earlier, which was plain JSON format. - jackson-cbor-264 = ${akka.serialization.jackson.jackson-cbor} + # Pekko 2.6.4 or earlier, which was plain JSON format. + jackson-cbor-264 = ${pekko.serialization.jackson.jackson-cbor} } #//#features #//#compression # Compression settings for the jackson-json binding -akka.serialization.jackson.jackson-json.compression { +pekko.serialization.jackson.jackson-json.compression { # Compression algorithm. # - off : no compression # - gzip : using common java gzip @@ -215,13 +215,13 @@ akka.serialization.jackson.jackson-json.compression { } #//#compression -akka.actor { +pekko.actor { serializers { jackson-json = "org.apache.pekko.serialization.jackson.JacksonJsonSerializer" jackson-cbor = "org.apache.pekko.serialization.jackson.JacksonCborSerializer" # Issue #28918 for compatibility with data serialized with JacksonCborSerializer in - # Akka 2.6.4 or earlier, which was plain JSON format. + # Pekko 2.6.4 or earlier, which was plain JSON format. jackson-cbor-264 = "org.apache.pekko.serialization.jackson.JacksonJsonSerializer" } serialization-identifiers { @@ -229,7 +229,7 @@ akka.actor { jackson-cbor = 33 # Issue #28918 for compatibility with data serialized with JacksonCborSerializer in - # Akka 2.6.4 or earlier, which was plain JSON format. + # Pekko 2.6.4 or earlier, which was plain JSON format. jackson-cbor-264 = 32 } serialization-bindings { diff --git a/akka-serialization-jackson/src/main/scala/org/apache/pekko/serialization/jackson/JacksonObjectMapperProvider.scala b/akka-serialization-jackson/src/main/scala/org/apache/pekko/serialization/jackson/JacksonObjectMapperProvider.scala index 8b6af1343f..aa1e2d285c 100644 --- a/akka-serialization-jackson/src/main/scala/org/apache/pekko/serialization/jackson/JacksonObjectMapperProvider.scala +++ b/akka-serialization-jackson/src/main/scala/org/apache/pekko/serialization/jackson/JacksonObjectMapperProvider.scala @@ -59,8 +59,8 @@ object JacksonObjectMapperProvider extends ExtensionId[JacksonObjectMapperProvid * The configuration for a given `bindingName`. */ def configForBinding(bindingName: String, systemConfig: Config): Config = { - val basePath = "akka.serialization.jackson" - val baseConf = systemConfig.getConfig("akka.serialization.jackson") + val basePath = "pekko.serialization.jackson" + val baseConf = systemConfig.getConfig("pekko.serialization.jackson") if (systemConfig.hasPath(s"$basePath.$bindingName")) systemConfig.getConfig(s"$basePath.$bindingName").withFallback(baseConf) else @@ -221,7 +221,7 @@ object JacksonObjectMapperProvider extends ExtensionId[JacksonObjectMapperProvid e, s"Could not load configured Jackson module [$fqcn], " + "please verify classpath dependencies or amend the configuration " + - "[akka.serialization.jackson-modules]. Continuing without this module.")) + "[pekko.serialization.jackson-modules]. Continuing without this module.")) None } } else @@ -304,7 +304,7 @@ final class JacksonObjectMapperProvider(system: ExtendedActorSystem) extends Ext * creates a new instance. * * The `ObjectMapper` is created with sensible defaults and modules configured - * in `akka.serialization.jackson.jackson-modules`. It's using [[JacksonObjectMapperProviderSetup]] + * in `pekko.serialization.jackson.jackson-modules`. It's using [[JacksonObjectMapperProviderSetup]] * if the `ActorSystem` is started with such [[pekko.actor.setup.ActorSystemSetup]]. * * The returned `ObjectMapper` must not be modified, because it may already be in use and such @@ -323,7 +323,7 @@ final class JacksonObjectMapperProvider(system: ExtendedActorSystem) extends Ext * creates a new instance. * * The `ObjectMapper` is created with sensible defaults and modules configured - * in `akka.serialization.jackson.jackson-modules`. It's using [[JacksonObjectMapperProviderSetup]] + * in `pekko.serialization.jackson.jackson-modules`. It's using [[JacksonObjectMapperProviderSetup]] * if the `ActorSystem` is started with such [[pekko.actor.setup.ActorSystemSetup]]. * * The returned `ObjectMapper` must not be modified, because it may already be in use and such @@ -338,7 +338,7 @@ final class JacksonObjectMapperProvider(system: ExtendedActorSystem) extends Ext /** * Scala API: Creates a new instance of a Jackson `ObjectMapper` with sensible defaults and modules configured - * in `akka.serialization.jackson.jackson-modules`. It's using [[JacksonObjectMapperProviderSetup]] + * in `pekko.serialization.jackson.jackson-modules`. It's using [[JacksonObjectMapperProviderSetup]] * if the `ActorSystem` is started with such [[pekko.actor.setup.ActorSystemSetup]]. * * @param bindingName name of this `ObjectMapper` @@ -362,7 +362,7 @@ final class JacksonObjectMapperProvider(system: ExtendedActorSystem) extends Ext /** * Java API: Creates a new instance of a Jackson `ObjectMapper` with sensible defaults and modules configured - * in `akka.serialization.jackson.jackson-modules`. It's using [[JacksonObjectMapperProviderSetup]] + * in `pekko.serialization.jackson.jackson-modules`. It's using [[JacksonObjectMapperProviderSetup]] * if the `ActorSystem` is started with such [[pekko.actor.setup.ActorSystemSetup]]. * * @param bindingName name of this `ObjectMapper` @@ -427,7 +427,7 @@ class JacksonObjectMapperFactory { * * @param bindingName bindingName name of this `ObjectMapper` * @param configuredModules the list of `Modules` that were configured in - * `akka.serialization.jackson.deserialization-features` + * `pekko.serialization.jackson.deserialization-features` */ def overrideConfiguredModules( @unused bindingName: String, @@ -444,7 +444,7 @@ class JacksonObjectMapperFactory { * * @param bindingName bindingName name of this `ObjectMapper` * @param configuredFeatures the list of `SerializationFeature` that were configured in - * `akka.serialization.jackson.serialization-features` + * `pekko.serialization.jackson.serialization-features` */ def overrideConfiguredSerializationFeatures( @unused bindingName: String, @@ -462,7 +462,7 @@ class JacksonObjectMapperFactory { * * @param bindingName bindingName name of this `ObjectMapper` * @param configuredFeatures the list of `DeserializationFeature` that were configured in - * `akka.serialization.jackson.deserialization-features` + * `pekko.serialization.jackson.deserialization-features` */ def overrideConfiguredDeserializationFeatures( @unused bindingName: String, @@ -476,7 +476,7 @@ class JacksonObjectMapperFactory { * return the features that are to be applied to the `ObjectMapper`. * * @param bindingName bindingName name of this `ObjectMapper` - * @param configuredFeatures the list of `MapperFeatures` that were configured in `akka.serialization.jackson.mapper-features` + * @param configuredFeatures the list of `MapperFeatures` that were configured in `pekko.serialization.jackson.mapper-features` */ def overrideConfiguredMapperFeatures( @unused bindingName: String, @@ -489,7 +489,7 @@ class JacksonObjectMapperFactory { * return the features that are to be applied to the `ObjectMapper`. * * @param bindingName bindingName name of this `ObjectMapper` - * @param configuredFeatures the list of `JsonParser.Feature` that were configured in `akka.serialization.jackson.json-parser-features` + * @param configuredFeatures the list of `JsonParser.Feature` that were configured in `pekko.serialization.jackson.json-parser-features` */ def overrideConfiguredJsonParserFeatures( @unused bindingName: String, @@ -502,7 +502,7 @@ class JacksonObjectMapperFactory { * return the features that are to be applied to the `ObjectMapper`. * * @param bindingName bindingName name of this `ObjectMapper` - * @param configuredFeatures the list of `JsonGenerator.Feature` that were configured in `akka.serialization.jackson.json-generator-features` + * @param configuredFeatures the list of `JsonGenerator.Feature` that were configured in `pekko.serialization.jackson.json-generator-features` */ def overrideConfiguredJsonGeneratorFeatures( @unused bindingName: String, @@ -516,7 +516,7 @@ class JacksonObjectMapperFactory { * that are to be applied to the `JsonFactoryBuilder`. * * @param bindingName bindingName name of this `ObjectMapper` - * @param configuredFeatures the list of `StreamReadFeature` that were configured in `akka.serialization.jackson.stream-read-features` + * @param configuredFeatures the list of `StreamReadFeature` that were configured in `pekko.serialization.jackson.stream-read-features` */ def overrideConfiguredStreamReadFeatures( @unused bindingName: String, @@ -529,7 +529,7 @@ class JacksonObjectMapperFactory { * that are to be applied to the `JsonFactoryBuilder`. * * @param bindingName bindingName name of this `ObjectMapper` - * @param configuredFeatures the list of `StreamWriterFeature` that were configured in `akka.serialization.jackson.stream-write-features` + * @param configuredFeatures the list of `StreamWriterFeature` that were configured in `pekko.serialization.jackson.stream-write-features` */ def overrideConfiguredStreamWriteFeatures( @unused bindingName: String, @@ -542,7 +542,7 @@ class JacksonObjectMapperFactory { * that are to be applied to the `JsonFactoryBuilder`. * * @param bindingName bindingName name of this `ObjectMapper` - * @param configuredFeatures the list of `JsonReadFeature` that were configured in `akka.serialization.jackson.json-read-features` + * @param configuredFeatures the list of `JsonReadFeature` that were configured in `pekko.serialization.jackson.json-read-features` */ def overrideConfiguredJsonReadFeatures( @unused bindingName: String, @@ -555,7 +555,7 @@ class JacksonObjectMapperFactory { * that are to be applied to the `JsonFactoryBuilder`. * * @param bindingName bindingName name of this `ObjectMapper` - * @param configuredFeatures the list of `JsonWriteFeature` that were configured in `akka.serialization.jackson.json-write-features` + * @param configuredFeatures the list of `JsonWriteFeature` that were configured in `pekko.serialization.jackson.json-write-features` */ def overrideConfiguredJsonWriteFeatures( @unused bindingName: String, @@ -569,7 +569,7 @@ class JacksonObjectMapperFactory { * * @param bindingName bindingName name of this `ObjectMapper` * @param configuredFeatures the list of `PropertyAccessor`/`JsonAutoDetect.Visibility` that were configured in - * `akka.serialization.jackson.visibility` + * `pekko.serialization.jackson.visibility` */ def overrideConfiguredVisibility( @unused bindingName: String, diff --git a/akka-serialization-jackson/src/main/scala/org/apache/pekko/serialization/jackson/JacksonSerializer.scala b/akka-serialization-jackson/src/main/scala/org/apache/pekko/serialization/jackson/JacksonSerializer.scala index 3db8c70f18..04e954e3e8 100644 --- a/akka-serialization-jackson/src/main/scala/org/apache/pekko/serialization/jackson/JacksonSerializer.scala +++ b/akka-serialization-jackson/src/main/scala/org/apache/pekko/serialization/jackson/JacksonSerializer.scala @@ -162,7 +162,7 @@ import pekko.util.OptionVal /** * INTERNAL API: Base class for Jackson serializers. * - * Configuration in `akka.serialization.jackson` section. + * Configuration in `pekko.serialization.jackson` section. * It will load Jackson modules defined in configuration `jackson-modules`. * * It will compress the payload if the compression `algorithm` is enabled and the the @@ -426,8 +426,8 @@ import pekko.util.OptionVal } else if (!isInAllowList(clazz)) { val warnMsg = s"Can't serialize/deserialize object of type [${clazz.getName}] in [${getClass.getName}]. " + "Only classes that are listed as allowed are allowed for security reasons. " + - "Configure allowed classes with akka.actor.serialization-bindings or " + - "akka.serialization.jackson.allowed-class-prefix." + "Configure allowed classes with pekko.actor.serialization-bindings or " + + "pekko.serialization.jackson.allowed-class-prefix." log.warning(LogMarker.Security, warnMsg) throw new IllegalArgumentException(warnMsg) } @@ -487,7 +487,7 @@ import pekko.util.OptionVal if (!isBindingOk(clazz)) { val warnMsg = "For security reasons it's not allowed to bind open-ended interfaces like " + s"[${clazz.getName}] to [${getClass.getName}]. " + - "Change your akka.actor.serialization-bindings configuration." + "Change your pekko.actor.serialization-bindings configuration." log.warning(LogMarker.Security, warnMsg) throw new IllegalArgumentException(warnMsg) } diff --git a/akka-serialization-jackson/src/test/resources/reference.conf b/akka-serialization-jackson/src/test/resources/reference.conf index 798b07f70f..9964158067 100644 --- a/akka-serialization-jackson/src/test/resources/reference.conf +++ b/akka-serialization-jackson/src/test/resources/reference.conf @@ -1,4 +1,4 @@ -akka { +pekko { actor { serialization-bindings { "org.apache.pekko.serialization.jackson.CborSerializable" = jackson-cbor diff --git a/akka-serialization-jackson/src/test/scala/doc/org/apache/pekko/serialization/jackson/SerializationDocSpec.scala b/akka-serialization-jackson/src/test/scala/doc/org/apache/pekko/serialization/jackson/SerializationDocSpec.scala index a6c181a8d6..414fb52045 100644 --- a/akka-serialization-jackson/src/test/scala/doc/org/apache/pekko/serialization/jackson/SerializationDocSpec.scala +++ b/akka-serialization-jackson/src/test/scala/doc/org/apache/pekko/serialization/jackson/SerializationDocSpec.scala @@ -35,7 +35,7 @@ final case class Message(name: String, nr: Int) extends MySerializable object SerializationDocSpec { val config = """ #//#serialization-bindings - akka.actor { + pekko.actor { serialization-bindings { "com.myservice.MySerializable" = jackson-json } @@ -45,7 +45,7 @@ object SerializationDocSpec { val configMigration = """ #//#migrations-conf - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "com.myservice.event.ItemAdded" = "com.myservice.event.ItemAddedMigration" } #//#migrations-conf @@ -53,7 +53,7 @@ object SerializationDocSpec { val configMigrationRenamClass = """ #//#migrations-conf-rename - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "com.myservice.event.OrderAdded" = "com.myservice.event.OrderPlacedMigration" } #//#migrations-conf-rename @@ -61,12 +61,12 @@ object SerializationDocSpec { val configSpecific = """ #//#specific-config - akka.serialization.jackson.jackson-json { + pekko.serialization.jackson.jackson-json { serialization-features { WRITE_DATES_AS_TIMESTAMPS = off } } - akka.serialization.jackson.jackson-cbor { + pekko.serialization.jackson.jackson-cbor { serialization-features { WRITE_DATES_AS_TIMESTAMPS = on } @@ -76,7 +76,7 @@ object SerializationDocSpec { val configSeveral = """ #//#several-config - akka.actor { + pekko.actor { serializers { jackson-json-message = "org.apache.pekko.serialization.jackson.JacksonJsonSerializer" jackson-json-event = "org.apache.pekko.serialization.jackson.JacksonJsonSerializer" @@ -90,7 +90,7 @@ object SerializationDocSpec { "com.myservice.MyEvent" = jackson-json-event } } - akka.serialization.jackson { + pekko.serialization.jackson { jackson-json-message { serialization-features { WRITE_DATES_AS_TIMESTAMPS = on @@ -107,7 +107,7 @@ object SerializationDocSpec { val configManifestless = """ #//#manifestless - akka.actor { + pekko.actor { serializers { jackson-json-event = "org.apache.pekko.serialization.jackson.JacksonJsonSerializer" } @@ -118,7 +118,7 @@ object SerializationDocSpec { "com.myservice.MyEvent" = jackson-json-event } } - akka.serialization.jackson { + pekko.serialization.jackson { jackson-json-event { type-in-manifest = off # Since there is exactly one serialization binding declared for this @@ -178,7 +178,7 @@ object SerializationDocSpec { val configDateTime = """ #//#date-time - akka.serialization.jackson.serialization-features { + pekko.serialization.jackson.serialization-features { WRITE_DATES_AS_TIMESTAMPS = on WRITE_DURATIONS_AS_TIMESTAMPS = on } @@ -187,7 +187,7 @@ object SerializationDocSpec { val configAllowList = """ #//#allowed-class-prefix - akka.serialization.jackson.allowed-class-prefix = + pekko.serialization.jackson.allowed-class-prefix = ["com.myservice.event.OrderAdded", "com.myservice.command"] #//#allowed-class-prefix """ @@ -199,7 +199,7 @@ class SerializationDocSpec ActorSystem( "SerializationDocSpec", ConfigFactory.parseString(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { # migrations for Java classes "jdoc.org.apache.pekko.serialization.jackson.v2b.ItemAdded" = "jdoc.org.apache.pekko.serialization.jackson.v2b.ItemAddedMigration" "jdoc.org.apache.pekko.serialization.jackson.v2c.ItemAdded" = "jdoc.org.apache.pekko.serialization.jackson.v2c.ItemAddedMigration" @@ -212,7 +212,7 @@ class SerializationDocSpec "doc.org.apache.pekko.serialization.jackson.v2a.Customer" = "doc.org.apache.pekko.serialization.jackson.v2a.CustomerMigration" "doc.org.apache.pekko.serialization.jackson.v1.OrderAdded" = "doc.org.apache.pekko.serialization.jackson.v2a.OrderPlacedMigration" } - akka.actor { + pekko.actor { allow-java-serialization = off serialization-bindings { "${classOf[jdoc.org.apache.pekko.serialization.jackson.MySerializable].getName}" = jackson-json diff --git a/akka-serialization-jackson/src/test/scala/org/apache/pekko/serialization/jackson/JacksonSerializerSpec.scala b/akka-serialization-jackson/src/test/scala/org/apache/pekko/serialization/jackson/JacksonSerializerSpec.scala index 168016d1b5..c04e59d1c6 100644 --- a/akka-serialization-jackson/src/test/scala/org/apache/pekko/serialization/jackson/JacksonSerializerSpec.scala +++ b/akka-serialization-jackson/src/test/scala/org/apache/pekko/serialization/jackson/JacksonSerializerSpec.scala @@ -201,9 +201,9 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { "JacksonSerializer configuration" must { withSystem(""" - akka.actor.serializers.jackson-json2 = "org.apache.pekko.serialization.jackson.JacksonJsonSerializer" - akka.actor.serialization-identifiers.jackson-json2 = 999 - akka.serialization.jackson.jackson-json2 { + pekko.actor.serializers.jackson-json2 = "org.apache.pekko.serialization.jackson.JacksonJsonSerializer" + pekko.actor.serialization-identifiers.jackson-json2 = 999 + pekko.serialization.jackson.jackson-json2 { # on is Jackson's default serialization-features.WRITE_DURATIONS_AS_TIMESTAMPS = off @@ -366,7 +366,7 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { // see SerializationFeature.WRITE_DATES_AS_TIMESTAMPS = on "be possible to serialize dates and durations as numeric timestamps" in { withSystem(""" - akka.serialization.jackson.serialization-features { + pekko.serialization.jackson.serialization-features { WRITE_DATES_AS_TIMESTAMPS = on WRITE_DURATIONS_AS_TIMESTAMPS = on } @@ -416,12 +416,12 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { "be possible to tune the visibility at ObjectMapper level (FIELD, PUBLIC_ONLY)" in { withSystem(""" - akka.actor { + pekko.actor { serialization-bindings { "org.apache.pekko.serialization.jackson.JavaTestMessages$ClassWithVisibility" = jackson-json } } - akka.serialization.jackson.visibility { + pekko.serialization.jackson.visibility { FIELD = PUBLIC_ONLY } """) { sys => @@ -436,12 +436,12 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { // (that is "FIELD = ANY") stays consistent "be possible to tune the visibility at ObjectMapper level (Akka default)" in { withSystem(""" - akka.actor { + pekko.actor { serialization-bindings { "org.apache.pekko.serialization.jackson.JavaTestMessages$ClassWithVisibility" = jackson-json } } - akka.serialization.jackson.visibility { + pekko.serialization.jackson.visibility { ## No overrides } """) { sys => @@ -591,7 +591,7 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { } "compress large payload with lz4" in withSystem(""" - akka.serialization.jackson.jackson-json.compression { + pekko.serialization.jackson.jackson-json.compression { algorithm = lz4 compress-larger-than = 32 KiB } @@ -608,7 +608,7 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { } "not compress small payload with lz4" in withSystem(""" - akka.serialization.jackson.jackson-json.compression { + pekko.serialization.jackson.jackson-json.compression { algorithm = lz4 compress-larger-than = 32 KiB } @@ -631,14 +631,14 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { "deserialize messages using the serialization bindings" in withSystem( """ - akka.actor { + pekko.actor { serializers.animal = "org.apache.pekko.serialization.jackson.JacksonJsonSerializer" serialization-identifiers.animal = 9091 serialization-bindings { "org.apache.pekko.serialization.jackson.ScalaTestMessages$Animal" = animal } } - akka.serialization.jackson.animal.type-in-manifest = off + pekko.serialization.jackson.animal.type-in-manifest = off """) { sys => val msg = Elephant("Dumbo", 1) val serializer = serializerFor(msg, sys) @@ -650,7 +650,7 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { "deserialize messages using the configured deserialization type" in withSystem( """ - akka.actor { + pekko.actor { serializers.animal = "org.apache.pekko.serialization.jackson.JacksonJsonSerializer" serialization-identifiers.animal = 9091 serialization-bindings { @@ -658,7 +658,7 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { "org.apache.pekko.serialization.jackson.ScalaTestMessages$Lion" = animal } } - akka.serialization.jackson.animal { + pekko.serialization.jackson.animal { type-in-manifest = off deserialization-type = "org.apache.pekko.serialization.jackson.ScalaTestMessages$Animal" } @@ -674,7 +674,7 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { "fail if multiple serialization bindings are declared with no deserialization type" in { an[IllegalArgumentException] should be thrownBy { withSystem(""" - akka.actor { + pekko.actor { serializers.animal = "org.apache.pekko.serialization.jackson.JacksonJsonSerializer" serialization-identifiers.animal = 9091 serialization-bindings { @@ -682,7 +682,7 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { "org.apache.pekko.serialization.jackson.ScalaTestMessages$Lion" = animal } } - akka.serialization.jackson.animal { + pekko.serialization.jackson.animal { type-in-manifest = off } """)(sys => checkSerialization(Elephant("Dumbo", 1), sys)) @@ -704,15 +704,15 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { object JacksonSerializerSpec { def baseConfig(serializerName: String): String = s""" - akka.actor { + pekko.actor { serialization-bindings { "org.apache.pekko.serialization.jackson.ScalaTestMessages$$TestMessage" = $serializerName "org.apache.pekko.serialization.jackson.JavaTestMessages$$TestMessage" = $serializerName } } - akka.serialization.jackson.allowed-class-prefix = ["org.apache.pekko.serialization.jackson.ScalaTestMessages$$OldCommand"] + pekko.serialization.jackson.allowed-class-prefix = ["org.apache.pekko.serialization.jackson.ScalaTestMessages$$OldCommand"] - akka.actor { + pekko.actor { serializers { inner-serializer = "org.apache.pekko.serialization.jackson.ScalaTestMessages$$InnerSerializationSerializer" } @@ -862,7 +862,7 @@ abstract class JacksonSerializerSpec(serializerName: String) // TODO: Consider moving the migrations Specs to a separate Spec "deserialize with migrations" in withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { ## Usually the key is a FQCN but we're hacking the name to use multiple migrations for the ## same type in a single test. "deserialize-Java.Event1-into-Java.Event3" = "org.apache.pekko.serialization.jackson.JavaTestEventMigrationV3" @@ -889,7 +889,7 @@ abstract class JacksonSerializerSpec(serializerName: String) val manifest = serializer.manifest(event1) withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "org.apache.pekko.serialization.jackson.JavaTestMessages$$Event1" = "org.apache.pekko.serialization.jackson.JavaTestEventMigrationV2" "org.apache.pekko.serialization.jackson.JavaTestMessages$$Event2" = "org.apache.pekko.serialization.jackson.JavaTestEventMigrationV2" } @@ -909,7 +909,7 @@ abstract class JacksonSerializerSpec(serializerName: String) "use the migration's currentVersion on new serializations" in { withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "org.apache.pekko.serialization.jackson.JavaTestMessages$$Event2" = "org.apache.pekko.serialization.jackson.JavaTestEventMigrationV2" } """ + JacksonSerializerSpec.baseConfig(serializerName)) { sysV2 => @@ -922,7 +922,7 @@ abstract class JacksonSerializerSpec(serializerName: String) "use the migration's currentVersion on new serializations when supporting forward versions" in { withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "org.apache.pekko.serialization.jackson.JavaTestMessages$$Event2" = "org.apache.pekko.serialization.jackson.JavaTestEventMigrationV2WithV3" } """ + JacksonSerializerSpec.baseConfig(serializerName)) { sysV2 => @@ -937,7 +937,7 @@ abstract class JacksonSerializerSpec(serializerName: String) val blobV3 = withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "org.apache.pekko.serialization.jackson.JavaTestMessages$$Event3" = "org.apache.pekko.serialization.jackson.JavaTestEventMigrationV3" } """ + JacksonSerializerSpec.baseConfig(serializerName)) { sysV3 => @@ -949,7 +949,7 @@ abstract class JacksonSerializerSpec(serializerName: String) val blobV2 = withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "org.apache.pekko.serialization.jackson.JavaTestMessages$$Event2" = "org.apache.pekko.serialization.jackson.JavaTestEventMigrationV2WithV3" } """ + JacksonSerializerSpec.baseConfig(serializerName)) { sysV2WithV3 => @@ -962,7 +962,7 @@ abstract class JacksonSerializerSpec(serializerName: String) } withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "org.apache.pekko.serialization.jackson.JavaTestMessages$$Event3" = "org.apache.pekko.serialization.jackson.JavaTestEventMigrationV3" } """ + JacksonSerializerSpec.baseConfig(serializerName)) { sysV3 => @@ -976,7 +976,7 @@ abstract class JacksonSerializerSpec(serializerName: String) "deserialize unsupported versions throws an exception" in { intercept[lang.IllegalStateException] { withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "org.apache.pekko.serialization.jackson.JavaTestMessages$$Event1" = "org.apache.pekko.serialization.jackson.JavaTestEventMigrationV2" "org.apache.pekko.serialization.jackson.JavaTestMessages$$Event2" = "org.apache.pekko.serialization.jackson.JavaTestEventMigrationV2" } @@ -1088,7 +1088,7 @@ abstract class JacksonSerializerSpec(serializerName: String) // TODO: Consider moving the migrations Specs to a separate Spec "deserialize with migrations" in withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { ## Usually the key is a FQCN but we're hacking the name to use multiple migrations for the ## same type in a single test. "deserialize-Event1-into-Event3" = "org.apache.pekko.serialization.jackson.ScalaTestEventMigrationV3" @@ -1115,7 +1115,7 @@ abstract class JacksonSerializerSpec(serializerName: String) val manifest = serializer.manifest(event1) withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "org.apache.pekko.serialization.jackson.ScalaTestMessages$$Event1" = "org.apache.pekko.serialization.jackson.ScalaTestEventMigrationV2" "org.apache.pekko.serialization.jackson.ScalaTestMessages$$Event2" = "org.apache.pekko.serialization.jackson.ScalaTestEventMigrationV2" } @@ -1135,7 +1135,7 @@ abstract class JacksonSerializerSpec(serializerName: String) "use the migration's currentVersion on new serializations" in { withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "org.apache.pekko.serialization.jackson.ScalaTestMessages$$Event2" = "org.apache.pekko.serialization.jackson.ScalaTestEventMigrationV2" } """ + JacksonSerializerSpec.baseConfig(serializerName)) { sysV2 => @@ -1150,7 +1150,7 @@ abstract class JacksonSerializerSpec(serializerName: String) val blobV3 = withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "org.apache.pekko.serialization.jackson.ScalaTestMessages$$Event3" = "org.apache.pekko.serialization.jackson.ScalaTestEventMigrationV3" } """ + JacksonSerializerSpec.baseConfig(serializerName)) { sysV3 => @@ -1162,7 +1162,7 @@ abstract class JacksonSerializerSpec(serializerName: String) val blobV2 = withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "org.apache.pekko.serialization.jackson.ScalaTestMessages$$Event2" = "org.apache.pekko.serialization.jackson.ScalaTestEventMigrationV2WithV3" } """ + JacksonSerializerSpec.baseConfig(serializerName)) { sysV2WithV3 => @@ -1175,7 +1175,7 @@ abstract class JacksonSerializerSpec(serializerName: String) } withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "org.apache.pekko.serialization.jackson.ScalaTestMessages$$Event3" = "org.apache.pekko.serialization.jackson.ScalaTestEventMigrationV3" } """ + JacksonSerializerSpec.baseConfig(serializerName)) { sysV3 => @@ -1189,7 +1189,7 @@ abstract class JacksonSerializerSpec(serializerName: String) "deserialize unsupported versions throws an exception" in { intercept[lang.IllegalStateException] { withSystem(s""" - akka.serialization.jackson.migrations { + pekko.serialization.jackson.migrations { "org.apache.pekko.serialization.jackson.ScalaTestMessages$$Event1" = "org.apache.pekko.serialization.jackson.ScalaTestEventMigrationV2" "org.apache.pekko.serialization.jackson.ScalaTestMessages$$Event2" = "org.apache.pekko.serialization.jackson.ScalaTestEventMigrationV2" } @@ -1256,7 +1256,7 @@ abstract class JacksonSerializerSpec(serializerName: String) val sys = ActorSystem( system.name, ConfigFactory.parseString(s""" - akka.actor.serialization-bindings { + pekko.actor.serialization-bindings { "$className" = $serializerName "org.apache.pekko.serialization.jackson.ScalaTestMessages$$TestMessage" = $serializerName } diff --git a/akka-slf4j/src/test/scala/org/apache/pekko/event/slf4j/Slf4jLoggerSpec.scala b/akka-slf4j/src/test/scala/org/apache/pekko/event/slf4j/Slf4jLoggerSpec.scala index eae72b7860..6412550b37 100644 --- a/akka-slf4j/src/test/scala/org/apache/pekko/event/slf4j/Slf4jLoggerSpec.scala +++ b/akka-slf4j/src/test/scala/org/apache/pekko/event/slf4j/Slf4jLoggerSpec.scala @@ -23,7 +23,7 @@ object Slf4jLoggerSpec { // This test depends on logback configuration in src/test/resources/logback-test.xml val config = """ - akka { + pekko { loglevel = INFO loggers = ["org.apache.pekko.event.slf4j.Slf4jLogger"] logger-startup-timeout = 30s @@ -83,7 +83,7 @@ class Slf4jLoggerSpec extends AkkaSpec(Slf4jLoggerSpec.config) with BeforeAndAft output.reset() } - val sourceThreadRegex = "sourceThread=Slf4jLoggerSpec-akka.actor.default-dispatcher-[1-9][0-9]*" + val sourceThreadRegex = "sourceThread=Slf4jLoggerSpec-pekko.actor.default-dispatcher-[1-9][0-9]*" "Slf4jLogger" must { diff --git a/akka-slf4j/src/test/scala/org/apache/pekko/event/slf4j/Slf4jLoggingFilterSpec.scala b/akka-slf4j/src/test/scala/org/apache/pekko/event/slf4j/Slf4jLoggingFilterSpec.scala index 8c94688be7..7491ebafe7 100644 --- a/akka-slf4j/src/test/scala/org/apache/pekko/event/slf4j/Slf4jLoggingFilterSpec.scala +++ b/akka-slf4j/src/test/scala/org/apache/pekko/event/slf4j/Slf4jLoggingFilterSpec.scala @@ -26,7 +26,7 @@ object Slf4jLoggingFilterSpec { // This test depends on logback configuration in src/test/resources/logback-test.xml val config = """ - akka { + pekko { loglevel = DEBUG # test verifies debug loggers = ["org.apache.pekko.event.slf4j.Slf4jLoggingFilterSpec$TestLogger"] logging-filter = "org.apache.pekko.event.slf4j.Slf4jLoggingFilter" diff --git a/akka-stream-testkit/src/main/resources/reference.conf b/akka-stream-testkit/src/main/resources/reference.conf index 7a99c683ad..74e194fcf3 100644 --- a/akka-stream-testkit/src/main/resources/reference.conf +++ b/akka-stream-testkit/src/main/resources/reference.conf @@ -1,3 +1,3 @@ -akka.stream.testkit { +pekko.stream.testkit { all-stages-stopped-timeout = 5 s } diff --git a/akka-stream-testkit/src/main/scala/org/apache/pekko/stream/testkit/StreamTestKit.scala b/akka-stream-testkit/src/main/scala/org/apache/pekko/stream/testkit/StreamTestKit.scala index 3c86cdf547..eaf06047ef 100644 --- a/akka-stream-testkit/src/main/scala/org/apache/pekko/stream/testkit/StreamTestKit.scala +++ b/akka-stream-testkit/src/main/scala/org/apache/pekko/stream/testkit/StreamTestKit.scala @@ -164,7 +164,7 @@ object TestPublisher { /** * Expect no messages. - * Waits for the default period configured as `akka.actor.testkit.expect-no-message-default`. + * Waits for the default period configured as `pekko.actor.testkit.expect-no-message-default`. */ def expectNoMessage(): Self = executeAfterSubscription { probe.expectNoMessage() @@ -200,7 +200,7 @@ object TestPublisher { * the remaining time governed by the innermost enclosing `within` block. * * Note that the timeout is scaled using Duration.dilated, which uses the - * configuration entry "akka.test.timefactor", while the min Duration is not. + * configuration entry "pekko.test.timefactor", while the min Duration is not. * * {{{ * val ret = within(50 millis) { @@ -672,8 +672,8 @@ object TestSubscriber { * Fluent DSL * * Assert that no message is received for the specified time. - * Waits for the default period configured as `akka.test.expect-no-message-default`. - * That timeout is scaled using the configuration entry "akka.test.timefactor". + * Waits for the default period configured as `pekko.test.expect-no-message-default`. + * That timeout is scaled using the configuration entry "pekko.test.timefactor". */ def expectNoMessage(): Self = { probe.expectNoMessage() @@ -787,7 +787,7 @@ object TestSubscriber { * the remaining time governed by the innermost enclosing `within` block. * * Note that the timeout is scaled using Duration.dilated, which uses the - * configuration entry "akka.test.timefactor", while the min Duration is not. + * configuration entry "pekko.test.timefactor", while the min Duration is not. * * {{{ * val ret = within(50 millis) { diff --git a/akka-stream-testkit/src/main/scala/org/apache/pekko/stream/testkit/scaladsl/StreamTestKit.scala b/akka-stream-testkit/src/main/scala/org/apache/pekko/stream/testkit/scaladsl/StreamTestKit.scala index bdb4dba65a..af20d64cfa 100644 --- a/akka-stream-testkit/src/main/scala/org/apache/pekko/stream/testkit/scaladsl/StreamTestKit.scala +++ b/akka-stream-testkit/src/main/scala/org/apache/pekko/stream/testkit/scaladsl/StreamTestKit.scala @@ -46,7 +46,7 @@ object StreamTestKit { /** INTERNAL API */ @InternalApi private[testkit] def assertNoChildren(sys: ActorSystem, supervisor: ActorRef): Unit = { val probe = TestProbe()(sys) - val c = sys.settings.config.getConfig("akka.stream.testkit") + val c = sys.settings.config.getConfig("pekko.stream.testkit") val timeout = c.getDuration("all-stages-stopped-timeout", MILLISECONDS).millis probe.within(timeout) { try probe.awaitAssert { diff --git a/akka-stream-testkit/src/test/resources/reference.conf b/akka-stream-testkit/src/test/resources/reference.conf index aa4e19d659..5712cfbe60 100644 --- a/akka-stream-testkit/src/test/resources/reference.conf +++ b/akka-stream-testkit/src/test/resources/reference.conf @@ -1,12 +1,12 @@ # The StreamTestDefaultMailbox verifies that stream actors are using the dispatcher defined in ActorMaterializerSettings. # -# All stream tests should use the dedicated `akka.test.stream-dispatcher` or disable this validation by defining: -# akka.actor.default-mailbox.mailbox-type = "org.apache.pekko.dispatch.UnboundedMailbox" -akka.actor.default-mailbox.mailbox-type = "org.apache.pekko.stream.testkit.StreamTestDefaultMailbox" +# All stream tests should use the dedicated `pekko.test.stream-dispatcher` or disable this validation by defining: +# pekko.actor.default-mailbox.mailbox-type = "org.apache.pekko.dispatch.UnboundedMailbox" +pekko.actor.default-mailbox.mailbox-type = "org.apache.pekko.stream.testkit.StreamTestDefaultMailbox" # Dispatcher for stream actors. Specified in tests with -# ActorMaterializerSettings(dispatcher = "akka.test.stream-dispatcher") -akka.test.stream-dispatcher { +# ActorMaterializerSettings(dispatcher = "pekko.test.stream-dispatcher") +pekko.test.stream-dispatcher { type = Dispatcher executor = "fork-join-executor" fork-join-executor { @@ -16,8 +16,8 @@ akka.test.stream-dispatcher { mailbox-requirement = "org.apache.pekko.dispatch.UnboundedMessageQueueSemantics" } -akka.stream { +pekko.stream { materializer { - dispatcher = "akka.test.stream-dispatcher" + dispatcher = "pekko.test.stream-dispatcher" } } diff --git a/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/BaseTwoStreamsSetup.scala b/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/BaseTwoStreamsSetup.scala index cbd8450ac4..4c302463e7 100644 --- a/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/BaseTwoStreamsSetup.scala +++ b/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/BaseTwoStreamsSetup.scala @@ -15,8 +15,8 @@ import pekko.stream.testkit.scaladsl.StreamTestKit._ import pekko.testkit.AkkaSpec abstract class BaseTwoStreamsSetup extends AkkaSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) { val TestException = new RuntimeException("test") with NoStackTrace diff --git a/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/StreamTestKitSpec.scala b/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/StreamTestKitSpec.scala index 500804274e..31d2e749d3 100644 --- a/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/StreamTestKitSpec.scala +++ b/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/StreamTestKitSpec.scala @@ -125,7 +125,7 @@ class StreamTestKitSpec extends AkkaSpec { val timeout = 100.millis // Initial delay is longer than the timeout so an exception will be thrown. // It also needs to be dilated since the testkit will dilate the timeout - // accordingly to `-Dakka.test.timefactor` value. + // accordingly to `-Dpekko.test.timefactor` value. val initialDelay = (timeout * 2).dilated Source .tick(initialDelay, 1.millis, 1) @@ -169,7 +169,7 @@ class StreamTestKitSpec extends AkkaSpec { val timeout = 100.millis // Initial delay is longer than the timeout so an exception will be thrown. // It also needs to be dilated since the testkit will dilate the timeout - // accordingly to `-Dakka.test.timefactor` value. + // accordingly to `-Dpekko.test.timefactor` value. val initialDelay = (timeout * 2).dilated Source .tick(initialDelay, 1.millis, 1) diff --git a/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/TestPublisherSubscriberSpec.scala b/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/TestPublisherSubscriberSpec.scala index c22e5ce546..60228a93e8 100644 --- a/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/TestPublisherSubscriberSpec.scala +++ b/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/TestPublisherSubscriberSpec.scala @@ -15,8 +15,8 @@ import pekko.stream.testkit.scaladsl.StreamTestKit._ import pekko.testkit.AkkaSpec class TestPublisherSubscriberSpec extends AkkaSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) { "TestPublisher and TestSubscriber" must { diff --git a/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/Utils.scala b/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/Utils.scala index f904a5ab6b..a0e16ef78d 100644 --- a/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/Utils.scala +++ b/akka-stream-testkit/src/test/scala/org/apache/pekko/stream/testkit/Utils.scala @@ -17,7 +17,7 @@ object Utils { /** Sets the default-mailbox to the usual [[pekko.dispatch.UnboundedMailbox]] instead of [[StreamTestDefaultMailbox]]. */ val UnboundedMailboxConfig = ConfigFactory.parseString( - """akka.actor.default-mailbox.mailbox-type = "org.apache.pekko.dispatch.UnboundedMailbox"""") + """pekko.actor.default-mailbox.mailbox-type = "org.apache.pekko.dispatch.UnboundedMailbox"""") case class TE(message: String) extends RuntimeException(message) with NoStackTrace diff --git a/akka-stream-tests-tck/src/test/scala/org/apache/pekko/stream/tck/AkkaPublisherVerification.scala b/akka-stream-tests-tck/src/test/scala/org/apache/pekko/stream/tck/AkkaPublisherVerification.scala index a650622bf8..29f7fb021a 100644 --- a/akka-stream-tests-tck/src/test/scala/org/apache/pekko/stream/tck/AkkaPublisherVerification.scala +++ b/akka-stream-tests-tck/src/test/scala/org/apache/pekko/stream/tck/AkkaPublisherVerification.scala @@ -22,8 +22,8 @@ abstract class AkkaPublisherVerification[T](val env: TestEnvironment, publisherS override def additionalConfig: Config = ConfigFactory.parseString(""" - akka.stream.materializer.initial-input-buffer-size = 512 - akka.stream.materializer.max-input-buffer-size = 512 + pekko.stream.materializer.initial-input-buffer-size = 512 + pekko.stream.materializer.max-input-buffer-size = 512 """) def this(printlnDebug: Boolean) = diff --git a/akka-stream-tests-tck/src/test/scala/org/apache/pekko/stream/tck/InputStreamSourceTest.scala b/akka-stream-tests-tck/src/test/scala/org/apache/pekko/stream/tck/InputStreamSourceTest.scala index 1cf5d11f70..6b5a62bd96 100644 --- a/akka-stream-tests-tck/src/test/scala/org/apache/pekko/stream/tck/InputStreamSourceTest.scala +++ b/akka-stream-tests-tck/src/test/scala/org/apache/pekko/stream/tck/InputStreamSourceTest.scala @@ -25,7 +25,7 @@ class InputStreamSourceTest extends AkkaPublisherVerification[ByteString] { num } }) - .withAttributes(ActorAttributes.dispatcher("akka.test.stream-dispatcher")) + .withAttributes(ActorAttributes.dispatcher("pekko.test.stream-dispatcher")) .take(elements) .runWith(Sink.asPublisher(false)) } diff --git a/akka-stream-tests/src/test/java/org/apache/pekko/stream/StreamAttributeDocTest.java b/akka-stream-tests/src/test/java/org/apache/pekko/stream/StreamAttributeDocTest.java index 3c0734db18..65e46c0d2e 100644 --- a/akka-stream-tests/src/test/java/org/apache/pekko/stream/StreamAttributeDocTest.java +++ b/akka-stream-tests/src/test/java/org/apache/pekko/stream/StreamAttributeDocTest.java @@ -31,7 +31,7 @@ public class StreamAttributeDocTest extends StreamTest { public static AkkaJUnitActorSystemResource actorSystemResource = new AkkaJUnitActorSystemResource( "StreamAttributeDocTest", - ConfigFactory.parseString("my-stream-dispatcher = akka.test.stream-dispatcher") + ConfigFactory.parseString("my-stream-dispatcher = pekko.test.stream-dispatcher") .withFallback(AkkaSpec.testConf())); @Test diff --git a/akka-stream-tests/src/test/resources/reference.conf b/akka-stream-tests/src/test/resources/reference.conf index 7b53530e4d..0a6c83f766 100644 --- a/akka-stream-tests/src/test/resources/reference.conf +++ b/akka-stream-tests/src/test/resources/reference.conf @@ -1,9 +1,9 @@ -akka { +pekko { loggers = ["org.apache.pekko.testkit.TestEventListener"] actor { default-dispatcher.throughput = 1 // Amplify the effects of fuzzing } - akka.actor.warn-about-java-serializer-usage = false + pekko.actor.warn-about-java-serializer-usage = false stream.materializer.debug.fuzzing-mode = on stream.secret-test-fuzzing-warning-disable = 42 diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/ActorMaterializerSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/ActorMaterializerSpec.scala index b7f6f8b850..823ddef4e1 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/ActorMaterializerSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/ActorMaterializerSpec.scala @@ -54,7 +54,7 @@ class ActorMaterializerSpec extends StreamSpec with ImplicitSender { implicit val deadlockSystem = ActorSystem( "ActorMaterializerSpec-deadlock", ConfigFactory.parseString(s""" - akka.actor.default-dispatcher { + pekko.actor.default-dispatcher { executor = "fork-join-executor" fork-join-executor { parallelism-min = $n @@ -63,8 +63,8 @@ class ActorMaterializerSpec extends StreamSpec with ImplicitSender { } } # undo stream testkit specific dispatcher and run "normally" - akka.actor.default-mailbox.mailbox-type = "org.apache.pekko.dispatch.UnboundedMailbox" - akka.stream.materializer.dispatcher = "akka.actor.default-dispatcher" + pekko.actor.default-mailbox.mailbox-type = "org.apache.pekko.dispatch.UnboundedMailbox" + pekko.stream.materializer.dispatcher = "pekko.actor.default-dispatcher" """)) try { import deadlockSystem.dispatcher @@ -139,7 +139,7 @@ class ActorMaterializerSpec extends StreamSpec with ImplicitSender { "terminate if ActorContext it was created from terminates" in { val p = TestProbe() - val a = system.actorOf(Props(new ActorWithMaterializer(p)).withDispatcher("akka.test.stream-dispatcher")) + val a = system.actorOf(Props(new ActorWithMaterializer(p)).withDispatcher("pekko.test.stream-dispatcher")) p.expectMsg("hello") a ! PoisonPill @@ -161,7 +161,7 @@ object ActorMaterializerSpec { @nowarn("msg=deprecated") class ActorWithMaterializer(p: TestProbe) extends Actor { private val settings: ActorMaterializerSettings = - ActorMaterializerSettings(context.system).withDispatcher("akka.test.stream-dispatcher") + ActorMaterializerSettings(context.system).withDispatcher("pekko.test.stream-dispatcher") implicit val mat: ActorMaterializer = ActorMaterializer(settings)(context) Source diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/DslFactoriesConsistencySpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/DslFactoriesConsistencySpec.scala index facc9cecbf..a517ba9ce7 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/DslFactoriesConsistencySpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/DslFactoriesConsistencySpec.scala @@ -239,7 +239,7 @@ class DslFactoriesConsistencySpec extends AnyWordSpec with Matchers { } def returnTypeString(m: Method): String = - m.returnType.getName.drop("akka.stream.".length) + m.returnType.getName.drop("pekko.stream.".length) case class Method(name: String, parameterTypes: List[Class[_]], returnType: Class[_], declaringClass: Class[_]) diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/StreamAttributeDocSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/StreamAttributeDocSpec.scala index 417f7188c9..be68c24cbf 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/StreamAttributeDocSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/StreamAttributeDocSpec.scala @@ -15,7 +15,7 @@ import pekko.stream.scaladsl.Source import pekko.stream.scaladsl.TcpAttributes import pekko.stream.testkit.StreamSpec -class StreamAttributeDocSpec extends StreamSpec("my-stream-dispatcher = \"akka.test.stream-dispatcher\"") { +class StreamAttributeDocSpec extends StreamSpec("my-stream-dispatcher = \"pekko.test.stream-dispatcher\"") { "Setting attributes on the runnable stream" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/StreamDispatcherSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/StreamDispatcherSpec.scala index 385329a985..9c17202809 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/StreamDispatcherSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/StreamDispatcherSpec.scala @@ -24,7 +24,7 @@ class StreamDispatcherSpec extends StreamSpec { "The deprecated default stream io dispatcher" must { "be the same as the default blocking io dispatcher for actors" in { // in case it is still used - val streamIoDispatcher = system.dispatchers.lookup("akka.stream.default-blocking-io-dispatcher") + val streamIoDispatcher = system.dispatchers.lookup("pekko.stream.default-blocking-io-dispatcher") val actorIoDispatcher = system.dispatchers.lookup(Dispatchers.DefaultBlockingDispatcherId) streamIoDispatcher shouldBe theSameInstanceAs(actorIoDispatcher) diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/impl/fusing/AsyncCallbackSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/impl/fusing/AsyncCallbackSpec.scala index 7553ca08fc..6ef014eab5 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/impl/fusing/AsyncCallbackSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/impl/fusing/AsyncCallbackSpec.scala @@ -24,7 +24,7 @@ import pekko.testkit.AkkaSpec import pekko.testkit.TestProbe class AsyncCallbackSpec extends AkkaSpec(""" - akka.stream.materializer.debug.fuzzing-mode = off + pekko.stream.materializer.debug.fuzzing-mode = off """) { case object Started diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/impl/fusing/ChasingEventsSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/impl/fusing/ChasingEventsSpec.scala index 24c0117d95..03f72801e2 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/impl/fusing/ChasingEventsSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/impl/fusing/ChasingEventsSpec.scala @@ -18,7 +18,7 @@ import pekko.stream.testkit.Utils.TE import pekko.testkit.AkkaSpec class ChasingEventsSpec extends AkkaSpec(""" - akka.stream.materializer.debug.fuzzing-mode = off + pekko.stream.materializer.debug.fuzzing-mode = off """) { class CancelInChasedPull extends GraphStage[FlowShape[Int, Int]] { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/DeprecatedTlsSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/DeprecatedTlsSpec.scala index 4aa0cd0c52..bdcdb27570 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/DeprecatedTlsSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/DeprecatedTlsSpec.scala @@ -93,9 +93,9 @@ object DeprecatedTlsSpec { val configOverrides = """ - akka.loglevel = DEBUG # issue 21660 - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.debug.receive=off + pekko.loglevel = DEBUG # issue 21660 + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.debug.receive=off """ } diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/FileSinkSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/FileSinkSpec.scala index b64eec9574..e96d92e29a 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/FileSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/FileSinkSpec.scala @@ -29,7 +29,7 @@ import pekko.util.ByteString @nowarn class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) with ScalaFutures { - val settings = ActorMaterializerSettings(system).withDispatcher("akka.actor.default-dispatcher") + val settings = ActorMaterializerSettings(system).withDispatcher("pekko.actor.default-dispatcher") implicit val materializer: ActorMaterializer = ActorMaterializer(settings) val fs = Jimfs.newFileSystem("FileSinkSpec", Configuration.unix()) @@ -182,7 +182,8 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) with ScalaFutures "allow overriding the dispatcher using Attributes" in { targetFile { f => val forever = Source.maybe - .toMat(FileIO.toPath(f).addAttributes(ActorAttributes.dispatcher("akka.actor.default-dispatcher")))(Keep.left) + .toMat(FileIO.toPath(f).addAttributes(ActorAttributes.dispatcher("pekko.actor.default-dispatcher")))( + Keep.left) .run() try { materializer @@ -190,7 +191,7 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) with ScalaFutures .supervisor .tell(StreamSupervisor.GetChildren, testActor) val ref = expectMsgType[Children].children.find(_.path.toString contains "fileSink").get - assertDispatcher(ref, "akka.actor.default-dispatcher") + assertDispatcher(ref, "pekko.actor.default-dispatcher") } finally { forever.complete(Success(None)) } diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/FileSourceSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/FileSourceSpec.scala index 6f2fa963e8..d750918530 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/FileSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/FileSourceSpec.scala @@ -34,7 +34,7 @@ object FileSourceSpec { @nowarn class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) { - val settings = ActorMaterializerSettings(system).withDispatcher("akka.actor.default-dispatcher") + val settings = ActorMaterializerSettings(system).withDispatcher("pekko.actor.default-dispatcher") implicit val materializer: ActorMaterializer = ActorMaterializer(settings) val fs = Jimfs.newFileSystem("FileSourceSpec", Configuration.unix()) @@ -274,7 +274,7 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) { try { val p = FileIO .fromPath(manyLines) - .addAttributes(ActorAttributes.dispatcher("akka.actor.default-dispatcher")) + .addAttributes(ActorAttributes.dispatcher("pekko.actor.default-dispatcher")) .runWith(TestSink.probe)(materializer) materializer @@ -282,7 +282,7 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) { .supervisor .tell(StreamSupervisor.GetChildren, testActor) val ref = expectMsgType[Children].children.find(_.path.toString contains "fileSource").get - try assertDispatcher(ref, "akka.actor.default-dispatcher") + try assertDispatcher(ref, "pekko.actor.default-dispatcher") finally p.cancel() } finally shutdown(sys) } diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/InputStreamSourceSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/InputStreamSourceSpec.scala index a62d1cb8f4..b964a01b1d 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/InputStreamSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/InputStreamSourceSpec.scala @@ -22,7 +22,7 @@ import pekko.util.ByteString @nowarn class InputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { - val settings = ActorMaterializerSettings(system).withDispatcher("akka.actor.default-dispatcher") + val settings = ActorMaterializerSettings(system).withDispatcher("pekko.actor.default-dispatcher") implicit val materializer: ActorMaterializer = ActorMaterializer(settings) private def inputStreamFor(bytes: Array[Byte]): InputStream = diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/OutputStreamSinkSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/OutputStreamSinkSpec.scala index 97e3e6a027..1fe1ffad32 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/OutputStreamSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/OutputStreamSinkSpec.scala @@ -23,7 +23,7 @@ import pekko.util.ByteString @nowarn class OutputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) with ScalaFutures { - val settings = ActorMaterializerSettings(system).withDispatcher("akka.actor.default-dispatcher") + val settings = ActorMaterializerSettings(system).withDispatcher("pekko.actor.default-dispatcher") implicit val materializer: ActorMaterializer = ActorMaterializer(settings) "OutputStreamSink" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/TcpHelper.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/TcpHelper.scala index d4096c1cad..6c9dc3ad4f 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/TcpHelper.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/TcpHelper.scala @@ -34,9 +34,9 @@ object TcpHelper { case object WriteAck extends Tcp.Event def testClientProps(connection: ActorRef): Props = - Props(new TestClient(connection)).withDispatcher("akka.test.stream-dispatcher") + Props(new TestClient(connection)).withDispatcher("pekko.test.stream-dispatcher") def testServerProps(address: InetSocketAddress, probe: ActorRef): Props = - Props(new TestServer(address, probe)).withDispatcher("akka.test.stream-dispatcher") + Props(new TestServer(address, probe)).withDispatcher("pekko.test.stream-dispatcher") class TestClient(connection: ActorRef) extends Actor { connection ! Tcp.Register(self, keepOpenOnPeerClosed = true, useResumeWriting = false) diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/TcpSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/TcpSpec.scala index cbdbf4e8cd..3c93f6c479 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/TcpSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/TcpSpec.scala @@ -91,12 +91,12 @@ class FailingDnsResolver extends DnsProvider { } class TcpSpec extends StreamSpec(""" - akka.loglevel = debug - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.io.tcp.trace-logging = true - akka.stream.materializer.subscription-timeout.timeout = 2s - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.loglevel = debug + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.io.tcp.trace-logging = true + pekko.stream.materializer.subscription-timeout.timeout = 2s + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) with TcpHelper with WithLogCapturing { "Outgoing TCP stream" must { @@ -542,7 +542,7 @@ class TcpSpec extends StreamSpec(""" val system2 = ActorSystem( "TcpSpec-unexpected-system2", ConfigFactory.parseString(""" - akka.loglevel = DEBUG # issue #21660 + pekko.loglevel = DEBUG # issue #21660 """).withFallback(system.settings.config)) try { @@ -592,7 +592,7 @@ class TcpSpec extends StreamSpec(""" val systemWithBrokenDns = ActorSystem( "TcpSpec-resolution-failure", ConfigFactory.parseString(""" - akka.io.dns.inet-address.provider-object = org.apache.pekko.stream.io.FailingDnsResolver + pekko.io.dns.inet-address.provider-object = org.apache.pekko.stream.io.FailingDnsResolver """).withFallback(system.settings.config)) try { val unknownHostName = "abcdefghijklmnopkuh" @@ -703,10 +703,10 @@ class TcpSpec extends StreamSpec(""" // configure a few timeouts we do not want to hit val config = ConfigFactory.parseString(""" - akka.actor.serializer-messages = off - akka.io.tcp.register-timeout = 42s - akka.stream.materializer.subscription-timeout.mode = cancel - akka.stream.materializer.subscription-timeout.timeout = 42s + pekko.actor.serializer-messages = off + pekko.io.tcp.register-timeout = 42s + pekko.stream.materializer.subscription-timeout.mode = cancel + pekko.stream.materializer.subscription-timeout.timeout = 42s """) val serverSystem = ActorSystem("server", config) val clientSystem = ActorSystem("client", config) diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/TlsSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/TlsSpec.scala index a39d159ed5..67da6daca5 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/TlsSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/io/TlsSpec.scala @@ -92,9 +92,9 @@ object TlsSpec { val configOverrides = """ - akka.loglevel = DEBUG # issue 21660 - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] - akka.actor.debug.receive=off + pekko.loglevel = DEBUG # issue 21660 + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] + pekko.actor.debug.receive=off """ } diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/ActorRefBackpressureSinkSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/ActorRefBackpressureSinkSpec.scala index 998e352280..9c69cd3382 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/ActorRefBackpressureSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/ActorRefBackpressureSinkSpec.scala @@ -54,7 +54,7 @@ class ActorRefBackpressureSinkSpec extends StreamSpec { import ActorRefBackpressureSinkSpec._ def createActor[T](c: Class[T]) = - system.actorOf(Props(c, testActor).withDispatcher("akka.test.stream-dispatcher")) + system.actorOf(Props(c, testActor).withDispatcher("pekko.test.stream-dispatcher")) "An ActorRefBackpressureSink" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/ActorRefSinkSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/ActorRefSinkSpec.scala index d2d4f2107b..d20390eff1 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/ActorRefSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/ActorRefSinkSpec.scala @@ -36,7 +36,7 @@ class ActorRefSinkSpec extends StreamSpec { } "cancel stream when actor terminates" in { - val fw = system.actorOf(Props(classOf[Fw], testActor).withDispatcher("akka.test.stream-dispatcher")) + val fw = system.actorOf(Props(classOf[Fw], testActor).withDispatcher("pekko.test.stream-dispatcher")) val publisher = TestSource .probe[Int] diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/AggregateWithBoundarySpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/AggregateWithBoundarySpec.scala index 3da6c23e2a..9ce391e7bb 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/AggregateWithBoundarySpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/AggregateWithBoundarySpec.scala @@ -77,7 +77,7 @@ class AggregateWithTimeBoundaryAndSimulatedTimeSpec extends AnyWordSpecLike with s"ActorSystemWithExplicitlyTriggeredScheduler-$id", ConfigFactory.load( AkkaSpec.testConf.withValue( - "akka.scheduler.implementation", + "pekko.scheduler.implementation", ConfigValueFactory.fromAnyRef("org.apache.pekko.testkit.ExplicitlyTriggeredScheduler")))) private def getEts(actor: ActorSystem): ExplicitlyTriggeredScheduler = { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/AttributesSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/AttributesSpec.scala index 2370696c36..06d77651ef 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/AttributesSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/AttributesSpec.scala @@ -673,7 +673,7 @@ class AttributesSpec .futureValue // should not override stage specific dispatcher - dispatcher should startWith("AttributesSpec-akka.actor.default-blocking-io-dispatcher") + dispatcher should startWith("AttributesSpec-pekko.actor.default-blocking-io-dispatcher") } finally { myDispatcherMaterializer.shutdown() @@ -696,8 +696,8 @@ class AttributesSpec throughput = 1 } my-io-dispatcher = $${my-dispatcher} - akka.stream.materializer.dispatcher = "my-dispatcher" - akka.stream.materializer.blocking-io-dispatcher = "my-io-dispatcher" + pekko.stream.materializer.dispatcher = "my-dispatcher" + pekko.stream.materializer.blocking-io-dispatcher = "my-io-dispatcher" """) // we need to revert to the regular mailbox or else the test suite will complain // about using non-test worthy dispatchers @@ -726,7 +726,7 @@ class AttributesSpec val threadName = Source.fromGraph(new ThreadNameSnitchingStage(None).addAttributes(Attributes(IODispatcher))).runWith(Sink.head) - threadName.futureValue should startWith("AttributesSpec-akka.actor.default-blocking-io-dispatcher") + threadName.futureValue should startWith("AttributesSpec-pekko.actor.default-blocking-io-dispatcher") } "allow for specifying a custom default io-dispatcher" in { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/BoundedSourceQueueSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/BoundedSourceQueueSpec.scala index f31f7bcef9..b83cc22f25 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/BoundedSourceQueueSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/BoundedSourceQueueSpec.scala @@ -15,8 +15,8 @@ import pekko.stream.testkit.{ StreamSpec, TestSubscriber } import pekko.stream.testkit.scaladsl.TestSink import pekko.testkit.WithLogCapturing -class BoundedSourceQueueSpec extends StreamSpec("""akka.loglevel = debug - |akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] +class BoundedSourceQueueSpec extends StreamSpec("""pekko.loglevel = debug + |pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"] |""".stripMargin) with WithLogCapturing { override implicit def patienceConfig: PatienceConfig = PatienceConfig(5.seconds) diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/CancellationStrategySpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/CancellationStrategySpec.scala index 3b54bb24ae..3d0adea7e4 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/CancellationStrategySpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/CancellationStrategySpec.scala @@ -32,8 +32,8 @@ import pekko.stream.testkit.Utils.TE import pekko.testkit._ import pekko.testkit.WithLogCapturing -class CancellationStrategySpec extends StreamSpec("""akka.loglevel = DEBUG - akka.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"]""") with WithLogCapturing { +class CancellationStrategySpec extends StreamSpec("""pekko.loglevel = DEBUG + pekko.loggers = ["org.apache.pekko.testkit.SilenceAllTestEventListener"]""") with WithLogCapturing { "CancellationStrategyAttribute" should { "support strategies" should { "CompleteStage" should { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/CollectionSinkSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/CollectionSinkSpec.scala index 002b88cb15..e34ed49d04 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/CollectionSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/CollectionSinkSpec.scala @@ -15,7 +15,7 @@ import pekko.stream.testkit.StreamSpec import pekko.stream.testkit.TestPublisher class CollectionSinkSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "Sink.collection" when { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/CoupledTerminationFlowSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/CoupledTerminationFlowSpec.scala index a82acc9ccd..953399a239 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/CoupledTerminationFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/CoupledTerminationFlowSpec.scala @@ -24,7 +24,7 @@ import pekko.stream.testkit.scaladsl.TestSource import pekko.testkit.TestProbe class CoupledTerminationFlowSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) with ScriptedTest { import system.dispatcher diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowAlsoToAllSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowAlsoToAllSpec.scala index 52aa6fd49c..01eb0409c4 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowAlsoToAllSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowAlsoToAllSpec.scala @@ -9,7 +9,7 @@ import pekko.stream.testkit._ import pekko.stream.testkit.scaladsl.TestSink class FlowAlsoToAllSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) with ScriptedTest { "An also to all" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowAskSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowAskSpec.scala index 3a1e9790df..1149fbf45a 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowAskSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowAskSpec.scala @@ -85,25 +85,26 @@ class FlowAskSpec extends StreamSpec { implicit val timeout: Timeout = pekko.util.Timeout(10.seconds) val replyOnInts = - system.actorOf(Props(classOf[Replier]).withDispatcher("akka.test.stream-dispatcher"), "replyOnInts") + system.actorOf(Props(classOf[Replier]).withDispatcher("pekko.test.stream-dispatcher"), "replyOnInts") - val dontReply = system.actorOf(TestActors.blackholeProps.withDispatcher("akka.test.stream-dispatcher"), "dontReply") + val dontReply = + system.actorOf(TestActors.blackholeProps.withDispatcher("pekko.test.stream-dispatcher"), "dontReply") val replyRandomDelays = system.actorOf( - Props(classOf[RandomDelaysReplier]).withDispatcher("akka.test.stream-dispatcher"), + Props(classOf[RandomDelaysReplier]).withDispatcher("pekko.test.stream-dispatcher"), "replyRandomDelays") val statusReplier = - system.actorOf(Props(new StatusReplier).withDispatcher("akka.test.stream-dispatcher"), "statusReplier") + system.actorOf(Props(new StatusReplier).withDispatcher("pekko.test.stream-dispatcher"), "statusReplier") def replierFailOn(n: Int) = - system.actorOf(Props(new FailOn(n)).withDispatcher("akka.test.stream-dispatcher"), s"failureReplier-$n") + system.actorOf(Props(new FailOn(n)).withDispatcher("pekko.test.stream-dispatcher"), s"failureReplier-$n") val failsOn1 = replierFailOn(1) val failsOn3 = replierFailOn(3) def replierFailAllExceptOn(n: Int) = - system.actorOf(Props(new FailOnAllExcept(n)).withDispatcher("akka.test.stream-dispatcher"), s"failureReplier-$n") + system.actorOf(Props(new FailOnAllExcept(n)).withDispatcher("pekko.test.stream-dispatcher"), s"failureReplier-$n") val failAllExcept6 = replierFailAllExceptOn(6) "produce asked elements" in { @@ -173,7 +174,7 @@ class FlowAskSpec extends StreamSpec { } "signal failure when target actor is terminated" in { - val r = system.actorOf(Props(classOf[Replier]).withDispatcher("akka.test.stream-dispatcher"), "wanna-fail") + val r = system.actorOf(Props(classOf[Replier]).withDispatcher("pekko.test.stream-dispatcher"), "wanna-fail") val done = Source.maybe[Int].ask[Reply](4)(r).runWith(Sink.ignore) intercept[RuntimeException] { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowBatchSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowBatchSpec.scala index b31b1d3ea1..bf37ce1416 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowBatchSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowBatchSpec.scala @@ -14,8 +14,8 @@ import pekko.stream.OverflowStrategy import pekko.stream.testkit._ class FlowBatchSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) { "Batch" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowBatchWeightedSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowBatchWeightedSpec.scala index 60a8959560..c30e002552 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowBatchWeightedSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowBatchWeightedSpec.scala @@ -9,8 +9,8 @@ import scala.concurrent.duration._ import org.apache.pekko.stream.testkit._ class FlowBatchWeightedSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) { "BatchWeighted" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowBufferSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowBufferSpec.scala index 100e1decb2..c9fcf3f961 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowBufferSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowBufferSpec.scala @@ -17,8 +17,8 @@ import pekko.stream.testkit.scaladsl._ @nowarn("msg=deprecated") class FlowBufferSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 1 - akka.stream.materializer.max-input-buffer-size = 1 + pekko.stream.materializer.initial-input-buffer-size = 1 + pekko.stream.materializer.max-input-buffer-size = 1 """) { "Buffer" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowConcatAllLazySpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowConcatAllLazySpec.scala index b14790f051..67a75a4497 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowConcatAllLazySpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowConcatAllLazySpec.scala @@ -13,8 +13,8 @@ import java.util.concurrent.atomic.AtomicBoolean import scala.util.control.NoStackTrace class FlowConcatAllLazySpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) { "ConcatAllLazy" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowConcatAllSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowConcatAllSpec.scala index d165b29ecb..f43d774d40 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowConcatAllSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowConcatAllSpec.scala @@ -12,8 +12,8 @@ import pekko.stream.testkit._ import pekko.util.ConstantFun class FlowConcatAllSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) { "ConcatAll" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowConflateSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowConflateSpec.scala index f0fc7a1ca1..cad22e4392 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowConflateSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowConflateSpec.scala @@ -20,8 +20,8 @@ import pekko.stream.testkit.Utils.TE import pekko.testkit.TestLatch class FlowConflateSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) { "Conflate" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowDispatcherSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowDispatcherSpec.scala index c2d00fa19e..3445774c1c 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowDispatcherSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowDispatcherSpec.scala @@ -13,13 +13,13 @@ import pekko.stream.testkit.StreamSpec import pekko.testkit.TestProbe @nowarn("msg=deprecated") -class FlowDispatcherSpec extends StreamSpec(s"my-dispatcher = $${akka.test.stream-dispatcher}") { +class FlowDispatcherSpec extends StreamSpec(s"my-dispatcher = $${pekko.test.stream-dispatcher}") { val defaultSettings = ActorMaterializerSettings(system) def testDispatcher( settings: ActorMaterializerSettings = defaultSettings, - dispatcher: String = "akka.test.stream-dispatcher") = { + dispatcher: String = "pekko.test.stream-dispatcher") = { implicit val materializer: ActorMaterializer = ActorMaterializer(settings) diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowDropSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowDropSpec.scala index 686e7bc7bb..69bfa735dc 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowDropSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowDropSpec.scala @@ -9,7 +9,7 @@ import java.util.concurrent.ThreadLocalRandom.{ current => random } import org.apache.pekko.stream.testkit._ class FlowDropSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) with ScriptedTest { "A Drop" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowExpandSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowExpandSpec.scala index 0787f98436..2864b42c19 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowExpandSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowExpandSpec.scala @@ -16,8 +16,8 @@ import pekko.stream.testkit.scaladsl.TestSink import pekko.stream.testkit.scaladsl.TestSource class FlowExpandSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) { "Expand" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowExtrapolateSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowExtrapolateSpec.scala index 633eb63049..6c44797dba 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowExtrapolateSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowExtrapolateSpec.scala @@ -15,10 +15,10 @@ import pekko.stream.testkit.scaladsl.TestSink import pekko.stream.testkit.scaladsl.TestSource class FlowExtrapolateSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 # see the ordering guarantee needed by the for loop below - akka.stream.materializer.debug.fuzzing-mode = off + pekko.stream.materializer.debug.fuzzing-mode = off """) { "Extrapolate" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowFilterSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowFilterSpec.scala index 7106407afb..ba3bc51eda 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowFilterSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowFilterSpec.scala @@ -18,7 +18,7 @@ import pekko.stream.testkit.scaladsl.TestSink import pekko.stream.testkit.scaladsl.TestSource class FlowFilterSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) with ScriptedTest { "A Filter" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowFlatMapPrefixSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowFlatMapPrefixSpec.scala index 50220965ec..ecc782c4b8 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowFlatMapPrefixSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowFlatMapPrefixSpec.scala @@ -23,7 +23,7 @@ import pekko.stream.testkit.{ StreamSpec, TestPublisher, TestSubscriber } import pekko.stream.testkit.Utils.TE // Debug loglevel to diagnose https://github.com/akka/akka/issues/30469 -class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { +class FlowFlatMapPrefixSpec extends StreamSpec("pekko.loglevel = debug") { def src10(i: Int = 0) = Source(i until (i + 10)) for { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowGroupBySpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowGroupBySpec.scala index ed3c9f1198..0c55199ab8 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowGroupBySpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowGroupBySpec.scala @@ -42,8 +42,8 @@ object FlowGroupBySpec { } class FlowGroupBySpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) { import FlowGroupBySpec._ diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowGroupedSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowGroupedSpec.scala index cfa9b314d2..4bed9e0f20 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowGroupedSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowGroupedSpec.scala @@ -13,7 +13,7 @@ import pekko.stream.testkit.ScriptedTest import pekko.stream.testkit.StreamSpec class FlowGroupedSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) with ScriptedTest { "A Grouped" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowGroupedWeightedSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowGroupedWeightedSpec.scala index 146daf01b0..90791e7b2b 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowGroupedWeightedSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowGroupedWeightedSpec.scala @@ -14,7 +14,7 @@ import pekko.testkit.TimingTest import pekko.util.unused class FlowGroupedWeightedSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) with ScriptedTest { "A GroupedWeighted" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowIdleInjectSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowIdleInjectSpec.scala index 88accd87a9..8aa0a1f193 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowIdleInjectSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowIdleInjectSpec.scala @@ -13,7 +13,7 @@ import pekko.stream.testkit.TestPublisher import pekko.stream.testkit.TestSubscriber class FlowIdleInjectSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "keepAlive" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowInitialDelaySpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowInitialDelaySpec.scala index 318f8144c9..7c00baaf00 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowInitialDelaySpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowInitialDelaySpec.scala @@ -14,7 +14,7 @@ import pekko.stream.testkit.StreamSpec import pekko.stream.testkit.TestSubscriber class FlowInitialDelaySpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "Flow initialDelay" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowInterleaveAllSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowInterleaveAllSpec.scala index 3dd31efbb9..efd9e4347c 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowInterleaveAllSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowInterleaveAllSpec.scala @@ -12,8 +12,8 @@ import pekko.stream.testkit.scaladsl.TestSink import java.util.StringJoiner class FlowInterleaveAllSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) { "An InterleaveAll for Flow " must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowIntersperseSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowIntersperseSpec.scala index e8c8fd9785..fa2b496668 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowIntersperseSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowIntersperseSpec.scala @@ -12,7 +12,7 @@ import pekko.stream.testkit.scaladsl.TestSink import pekko.stream.testkit.scaladsl.TestSource class FlowIntersperseSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "A Intersperse" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowIteratorSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowIteratorSpec.scala index 5d9b09398a..eeace5510d 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowIteratorSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowIteratorSpec.scala @@ -67,8 +67,8 @@ class FlowIterableSpec extends AbstractFlowIteratorSpec { } abstract class AbstractFlowIteratorSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) { def testName: String diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowJoinSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowJoinSpec.scala index fb2f3c36e9..f78e070dee 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowJoinSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowJoinSpec.scala @@ -15,7 +15,7 @@ import pekko.stream.testkit._ import pekko.stream.testkit.scaladsl._ class FlowJoinSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { implicit val defaultPatience: PatienceConfig = diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLimitSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLimitSpec.scala index c4c010d8e1..4048166ff1 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLimitSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLimitSpec.scala @@ -11,7 +11,7 @@ import pekko.stream.StreamLimitReachedException import pekko.stream.testkit.StreamSpec class FlowLimitSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "Limit" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLimitWeightedSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLimitWeightedSpec.scala index ee18ccec21..44cd8df9cb 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLimitWeightedSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLimitWeightedSpec.scala @@ -12,7 +12,7 @@ import pekko.stream.testkit.StreamSpec import pekko.util.unused class FlowLimitWeightedSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "Limit" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLogSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLogSpec.scala index be5bc3ea13..96d0abf295 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLogSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLogSpec.scala @@ -19,7 +19,7 @@ import pekko.stream.testkit.{ ScriptedTest, StreamSpec } import pekko.testkit.TestProbe class FlowLogSpec extends StreamSpec(""" - akka.loglevel = DEBUG # test verifies logging + pekko.loglevel = DEBUG # test verifies logging """) with ScriptedTest { val logProbe = { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLogWithMarkerSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLogWithMarkerSpec.scala index 88238c5892..ab28c10843 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLogWithMarkerSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowLogWithMarkerSpec.scala @@ -19,7 +19,7 @@ import pekko.stream.testkit.{ ScriptedTest, StreamSpec } import pekko.testkit.TestProbe class FlowLogWithMarkerSpec extends StreamSpec(""" - akka.loglevel = DEBUG # test verifies logging + pekko.loglevel = DEBUG # test verifies logging """) with ScriptedTest { val logProbe = { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMapConcatSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMapConcatSpec.scala index f2336e6f6b..f6ba805c28 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMapConcatSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMapConcatSpec.scala @@ -13,7 +13,7 @@ import pekko.stream.testkit._ import pekko.stream.testkit.scaladsl.TestSink class FlowMapConcatSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) with ScriptedTest { "A MapConcat" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMapErrorSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMapErrorSpec.scala index 8510186174..3ff5f7d38e 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMapErrorSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMapErrorSpec.scala @@ -11,8 +11,8 @@ import pekko.stream.testkit.StreamSpec import pekko.stream.testkit.scaladsl.TestSink class FlowMapErrorSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 1 - akka.stream.materializer.max-input-buffer-size = 1 + pekko.stream.materializer.initial-input-buffer-size = 1 + pekko.stream.materializer.max-input-buffer-size = 1 """) { val ex = new RuntimeException("ex") with NoStackTrace diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMapSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMapSpec.scala index ab6670680d..770b8b1090 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMapSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMapSpec.scala @@ -9,7 +9,7 @@ import java.util.concurrent.ThreadLocalRandom.{ current => random } import org.apache.pekko.stream.testkit._ class FlowMapSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) with ScriptedTest { "A Map" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMergeAllSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMergeAllSpec.scala index 4fd74d803c..c181612b01 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMergeAllSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowMergeAllSpec.scala @@ -9,7 +9,7 @@ import pekko.stream.testkit._ import pekko.stream.testkit.scaladsl.TestSink class FlowMergeAllSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) with ScriptedTest { "Flow mergeAll" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowOnCompleteSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowOnCompleteSpec.scala index 4afbb3bb05..ca13230d03 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowOnCompleteSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowOnCompleteSpec.scala @@ -16,7 +16,7 @@ import pekko.stream.testkit._ import pekko.testkit.TestProbe class FlowOnCompleteSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) with ScriptedTest { "A Flow with onComplete" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowPrefixAndTailSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowPrefixAndTailSpec.scala index f22aeae3db..099d13b2ec 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowPrefixAndTailSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowPrefixAndTailSpec.scala @@ -15,8 +15,8 @@ import pekko.stream._ import pekko.stream.testkit._ class FlowPrefixAndTailSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) { "PrefixAndTail" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowRecoverSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowRecoverSpec.scala index 0ae0eb7cb7..7d0a148e62 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowRecoverSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowRecoverSpec.scala @@ -12,8 +12,8 @@ import pekko.stream.testkit.scaladsl.TestSink import pekko.testkit.EventFilter class FlowRecoverSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 1 - akka.stream.materializer.max-input-buffer-size = 1 + pekko.stream.materializer.initial-input-buffer-size = 1 + pekko.stream.materializer.max-input-buffer-size = 1 """) { val ex = new RuntimeException("ex") with NoStackTrace diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowScanSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowScanSpec.scala index 66f32361ab..869ceeff88 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowScanSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowScanSpec.scala @@ -19,7 +19,7 @@ import pekko.stream.testkit.Utils._ import pekko.stream.testkit.scaladsl.TestSink class FlowScanSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "A Scan" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSectionSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSectionSpec.scala index 2dbd531fa9..811743fc9d 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSectionSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSectionSpec.scala @@ -14,8 +14,8 @@ import pekko.testkit.TestProbe object FlowSectionSpec { val config = s""" - my-dispatcher1 = $${akka.test.stream-dispatcher} - my-dispatcher2 = $${akka.test.stream-dispatcher} + my-dispatcher1 = $${pekko.test.stream-dispatcher} + my-dispatcher2 = $${pekko.test.stream-dispatcher} """ } @@ -79,7 +79,7 @@ class FlowSectionSpec extends StreamSpec(FlowSectionSpec.config) { Source(0 to 2).via(f1).via(f2).runWith(Sink.ignore) defaultDispatcher.receiveN(3).foreach { - case s: String => s should include("akka.test.stream-dispatcher") + case s: String => s should include("pekko.test.stream-dispatcher") case unexpected => throw new RuntimeException(s"Unexpected: $unexpected") } diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSpec.scala index abe3a4eeb8..cb423d6158 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSpec.scala @@ -36,7 +36,7 @@ object FlowSpec { } @nowarn // tests type assignments compile -class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.receive=off\nakka.loglevel=INFO")) { +class FlowSpec extends StreamSpec(ConfigFactory.parseString("pekko.actor.debug.receive=off\nakka.loglevel=INFO")) { import FlowSpec._ val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 2) diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSplitAfterSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSplitAfterSpec.scala index e8b1032020..d2650a9a0c 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSplitAfterSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSplitAfterSpec.scala @@ -29,10 +29,10 @@ object FlowSplitAfterSpec { } class FlowSplitAfterSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 - akka.stream.materializer.subscription-timeout.timeout = 1s - akka.stream.materializer.subscription-timeout.mode = cancel + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.subscription-timeout.timeout = 1s + pekko.stream.materializer.subscription-timeout.mode = cancel """) { import FlowSplitAfterSpec._ diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSplitWhenSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSplitWhenSpec.scala index 3c9cdd662f..80d5a515e3 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSplitWhenSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowSplitWhenSpec.scala @@ -22,10 +22,10 @@ import pekko.stream.testkit.Utils._ import pekko.stream.testkit.scaladsl.TestSink class FlowSplitWhenSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 - akka.stream.materializer.subscription-timeout.timeout = 1s - akka.stream.materializer.subscription-timeout.mode = cancel + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.subscription-timeout.timeout = 1s + pekko.stream.materializer.subscription-timeout.mode = cancel """) { import FlowSplitAfterSpec._ diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowStatefulMapConcatSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowStatefulMapConcatSpec.scala index 28ba86c5d4..97163108a5 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowStatefulMapConcatSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowStatefulMapConcatSpec.scala @@ -13,7 +13,7 @@ import pekko.stream.testkit._ import pekko.stream.testkit.scaladsl.TestSink class FlowStatefulMapConcatSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) with ScriptedTest { val ex = new Exception("TEST") with NoStackTrace diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowTakeSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowTakeSpec.scala index 641de1fec8..1bfa5f874a 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowTakeSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowTakeSpec.scala @@ -16,7 +16,7 @@ import pekko.stream.impl.RequestMore import pekko.stream.testkit._ class FlowTakeSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) with ScriptedTest { muteDeadLetters(classOf[OnNext], OnComplete.getClass, classOf[RequestMore[_]])() diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowThrottleSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowThrottleSpec.scala index e34c2d7b5d..a414e2688a 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowThrottleSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowThrottleSpec.scala @@ -23,8 +23,8 @@ import pekko.testkit.TimingTest import pekko.util.ByteString class FlowThrottleSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) { def genByteString(length: Int) = diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWatchSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWatchSpec.scala index 68462b0cb4..c8c5a0481c 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWatchSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWatchSpec.scala @@ -31,9 +31,10 @@ class FlowWatchSpec extends StreamSpec { "A Flow with watch" must { val replyOnInts = - system.actorOf(Props(classOf[Replier]).withDispatcher("akka.test.stream-dispatcher"), "replyOnInts") + system.actorOf(Props(classOf[Replier]).withDispatcher("pekko.test.stream-dispatcher"), "replyOnInts") - val dontReply = system.actorOf(TestActors.blackholeProps.withDispatcher("akka.test.stream-dispatcher"), "dontReply") + val dontReply = + system.actorOf(TestActors.blackholeProps.withDispatcher("pekko.test.stream-dispatcher"), "dontReply") "pass through elements while actor is alive" in { val c = TestSubscriber.manualProbe[Int]() @@ -49,7 +50,7 @@ class FlowWatchSpec extends StreamSpec { } "signal failure when target actor is terminated" in { - val r = system.actorOf(Props(classOf[Replier]).withDispatcher("akka.test.stream-dispatcher"), "wanna-fail") + val r = system.actorOf(Props(classOf[Replier]).withDispatcher("pekko.test.stream-dispatcher"), "wanna-fail") val done = Source.maybe[Int].watch(r).runWith(Sink.ignore) intercept[RuntimeException] { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWireTapSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWireTapSpec.scala index e944d041a3..895b1dde96 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWireTapSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWireTapSpec.scala @@ -11,7 +11,7 @@ import pekko.Done import pekko.stream.testkit._ import pekko.stream.testkit.Utils._ -class FlowWireTapSpec extends StreamSpec("akka.stream.materializer.debug.fuzzing-mode = off") { +class FlowWireTapSpec extends StreamSpec("pekko.stream.materializer.debug.fuzzing-mode = off") { import system.dispatcher diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWithContextLogSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWithContextLogSpec.scala index f5ae5dbbfe..653dd9f1fc 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWithContextLogSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWithContextLogSpec.scala @@ -13,7 +13,7 @@ import pekko.stream.testkit.StreamSpec import pekko.testkit.TestProbe class FlowWithContextLogSpec extends StreamSpec(""" - akka.loglevel = DEBUG # test verifies logging + pekko.loglevel = DEBUG # test verifies logging """) with ScriptedTest { val logProbe = { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWithContextThrottleSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWithContextThrottleSpec.scala index dae966a2a2..df7f0db302 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWithContextThrottleSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/FlowWithContextThrottleSpec.scala @@ -12,8 +12,8 @@ import pekko.stream.testkit._ import pekko.stream.testkit.scaladsl.TestSink class FlowWithContextThrottleSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 - akka.stream.materializer.max-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.max-input-buffer-size = 2 """) { private def toMessage(i: Int) = Message(s"data-$i", i.toLong) diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphBalanceSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphBalanceSpec.scala index 379852a48d..da136d29a7 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphBalanceSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphBalanceSpec.scala @@ -14,7 +14,7 @@ import pekko.stream.testkit._ import pekko.stream.testkit.scaladsl._ class GraphBalanceSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "A balance" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphBroadcastSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphBroadcastSpec.scala index 5121060935..f456613a60 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphBroadcastSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphBroadcastSpec.scala @@ -15,7 +15,7 @@ import pekko.stream.testkit.scaladsl.TestSink import pekko.stream.testkit.scaladsl.TestSource class GraphBroadcastSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "A broadcast" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphOpsIntegrationSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphOpsIntegrationSpec.scala index 6c14613d24..8a01e44384 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphOpsIntegrationSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphOpsIntegrationSpec.scala @@ -41,7 +41,7 @@ object GraphOpsIntegrationSpec { } class GraphOpsIntegrationSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { import pekko.stream.scaladsl.GraphOpsIntegrationSpec._ diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphPartialSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphPartialSpec.scala index 17152a70b5..54fd3fa054 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphPartialSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphPartialSpec.scala @@ -13,7 +13,7 @@ import pekko.stream.FlowShape import pekko.stream.testkit.StreamSpec class GraphPartialSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "GraphDSL.partial" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphPartitionSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphPartitionSpec.scala index aec94d17cf..ef01f0106a 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphPartitionSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphPartitionSpec.scala @@ -16,7 +16,7 @@ import pekko.stream.testkit._ import pekko.stream.testkit.Utils.TE class GraphPartitionSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "A partition" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphUnzipSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphUnzipSpec.scala index d43e1b87a9..5f1c8cb96d 100755 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphUnzipSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphUnzipSpec.scala @@ -12,7 +12,7 @@ import pekko.stream.OverflowStrategy import pekko.stream.testkit._ class GraphUnzipSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "A unzip" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphUnzipWithSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphUnzipWithSpec.scala index 7ebe447cb4..8f24b18e06 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphUnzipWithSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphUnzipWithSpec.scala @@ -22,7 +22,7 @@ import pekko.testkit.TestProbe import pekko.util.unused class GraphUnzipWithSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { import GraphDSL.Implicits._ diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphWireTapSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphWireTapSpec.scala index fc83ca6ed7..98045440f6 100755 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphWireTapSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/GraphWireTapSpec.scala @@ -9,7 +9,7 @@ import pekko.stream.testkit._ import pekko.stream.testkit.scaladsl.TestSink class GraphWireTapSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "A wire tap" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/HeadSinkSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/HeadSinkSpec.scala index 8a083af819..61a673301e 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/HeadSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/HeadSinkSpec.scala @@ -14,7 +14,7 @@ import pekko.stream.Materializer import pekko.stream.testkit._ class HeadSinkSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) with ScriptedTest { "A Flow with Sink.head" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/LazyFlowSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/LazyFlowSpec.scala index f2c41d1b5a..ad7d1fad02 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/LazyFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/LazyFlowSpec.scala @@ -24,8 +24,8 @@ import pekko.testkit.TestProbe @nowarn("msg=deprecated") // tests deprecated API as well class LazyFlowSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 1 - akka.stream.materializer.max-input-buffer-size = 1 + pekko.stream.materializer.initial-input-buffer-size = 1 + pekko.stream.materializer.max-input-buffer-size = 1 """) { import system.dispatcher diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/LazySinkSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/LazySinkSpec.scala index 6b5dd60db4..c31b77d0aa 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/LazySinkSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/LazySinkSpec.scala @@ -28,8 +28,8 @@ import pekko.stream.testkit.scaladsl.TestSink @nowarn("msg=deprecated") class LazySinkSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 1 - akka.stream.materializer.max-input-buffer-size = 1 + pekko.stream.materializer.initial-input-buffer-size = 1 + pekko.stream.materializer.max-input-buffer-size = 1 """) { import system.dispatcher diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/RestartSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/RestartSpec.scala index ff4fddcf88..62c4eab571 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/RestartSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/RestartSpec.scala @@ -37,7 +37,7 @@ import pekko.testkit.TestProbe import pekko.testkit.TimingTest class RestartSpec - extends StreamSpec(Map("akka.test.single-expect-default" -> "10s", "akka.loglevel" -> "INFO")) + extends StreamSpec(Map("pekko.test.single-expect-default" -> "10s", "pekko.loglevel" -> "INFO")) with DefaultTimeout { import system.dispatcher diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/RetryFlowSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/RetryFlowSpec.scala index ff39eef1b6..1e416f7026 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/RetryFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/RetryFlowSpec.scala @@ -16,8 +16,8 @@ import pekko.stream.testkit.{ StreamSpec, TestPublisher, TestSubscriber } import pekko.stream.testkit.scaladsl.{ TestSink, TestSource } class RetryFlowSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 1 - akka.stream.materializer.max-input-buffer-size = 1 + pekko.stream.materializer.initial-input-buffer-size = 1 + pekko.stream.materializer.max-input-buffer-size = 1 """) with CustomMatchers { final val Failed = new Exception("prepared failure") diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SeqSinkSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SeqSinkSpec.scala index 5ab6534ac7..c5d3eaa4be 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SeqSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SeqSinkSpec.scala @@ -15,7 +15,7 @@ import pekko.stream.testkit.StreamSpec import pekko.stream.testkit.TestPublisher class SeqSinkSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "Sink.toSeq" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SinkAsJavaStreamSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SinkAsJavaStreamSpec.scala index 270fafe147..bee088d3b7 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SinkAsJavaStreamSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SinkAsJavaStreamSpec.scala @@ -51,14 +51,14 @@ class SinkAsJavaStreamSpec extends StreamSpec(UnboundedMailboxConfig) { "allow overriding the dispatcher using Attributes" in { val probe = TestSource .probe[ByteString] - .to(StreamConverters.asJavaStream().addAttributes(ActorAttributes.dispatcher("akka.actor.default-dispatcher"))) + .to(StreamConverters.asJavaStream().addAttributes(ActorAttributes.dispatcher("pekko.actor.default-dispatcher"))) .run() SystemMaterializer(system).materializer .asInstanceOf[PhasedFusingActorMaterializer] .supervisor .tell(StreamSupervisor.GetChildren, testActor) val ref = expectMsgType[Children].children.find(_.path.toString contains "asJavaStream").get - assertDispatcher(ref, "akka.actor.default-dispatcher") + assertDispatcher(ref, "pekko.actor.default-dispatcher") probe.sendComplete() } diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/StreamConvertersSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/StreamConvertersSpec.scala index 090160b88a..875067bb09 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/StreamConvertersSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/StreamConvertersSpec.scala @@ -320,7 +320,7 @@ class StreamConvertersSpec extends StreamSpec with DefaultTimeout { "produce a single value" in { val source = Source.single(ByteString("ASDF")) val sink = - StreamConverters.asInputStream().withAttributes(ActorAttributes.dispatcher("akka.test.stream-dispatcher")) + StreamConverters.asInputStream().withAttributes(ActorAttributes.dispatcher("pekko.test.stream-dispatcher")) val is = source.runWith(sink) is.read() should be('A') @@ -335,7 +335,7 @@ class StreamConvertersSpec extends StreamSpec with DefaultTimeout { "withstand being closed twice" in { val source = Source.single(ByteString("ASDF")) val sink = - StreamConverters.asInputStream().withAttributes(ActorAttributes.dispatcher("akka.test.stream-dispatcher")) + StreamConverters.asInputStream().withAttributes(ActorAttributes.dispatcher("pekko.test.stream-dispatcher")) val is = source.runWith(sink) is.read() should be('A') @@ -347,7 +347,7 @@ class StreamConvertersSpec extends StreamSpec with DefaultTimeout { "OutputStream Source" must { "ignore empty arrays" in { val source = - StreamConverters.asOutputStream().withAttributes(ActorAttributes.dispatcher("akka.test.stream-dispatcher")) + StreamConverters.asOutputStream().withAttributes(ActorAttributes.dispatcher("pekko.test.stream-dispatcher")) val (out, result) = source.toMat(Sink.seq)(Keep.both).run() diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/StreamRefsSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/StreamRefsSpec.scala index 542f5b72c9..7617653718 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/StreamRefsSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/StreamRefsSpec.scala @@ -28,7 +28,7 @@ import pekko.util.ByteString object StreamRefsSpec { object DataSourceActor { - def props(): Props = Props(new DataSourceActor()).withDispatcher("akka.test.stream-dispatcher") + def props(): Props = Props(new DataSourceActor()).withDispatcher("pekko.test.stream-dispatcher") } case class Command(cmd: String, probe: ActorRef) @@ -170,7 +170,7 @@ object StreamRefsSpec { def config(): Config = { ConfigFactory.parseString(s""" - akka { + pekko { loglevel = DEBUG actor { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SubscriberSinkSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SubscriberSinkSpec.scala index 858f306228..2884cfc5a2 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SubscriberSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SubscriberSinkSpec.scala @@ -7,7 +7,7 @@ package org.apache.pekko.stream.scaladsl import org.apache.pekko.stream.testkit._ class SubscriberSinkSpec extends StreamSpec(""" - akka.stream.materializer.initial-input-buffer-size = 2 + pekko.stream.materializer.initial-input-buffer-size = 2 """) { "A Flow with SubscriberSink" must { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SubstreamSubscriptionTimeoutSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SubstreamSubscriptionTimeoutSpec.scala index a15b6934c4..a08732eb7c 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SubstreamSubscriptionTimeoutSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/SubstreamSubscriptionTimeoutSpec.scala @@ -11,7 +11,7 @@ import pekko.stream.impl.SubscriptionTimeoutException import pekko.stream.testkit._ class SubstreamSubscriptionTimeoutSpec extends StreamSpec(""" - akka.stream.materializer { + pekko.stream.materializer { initial-input-buffer-size = 2 max-input-buffer-size = 2 subscription-timeout { diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/TestConfig.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/TestConfig.scala index 2d316cf8e4..745307b7ba 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/TestConfig.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/scaladsl/TestConfig.scala @@ -5,6 +5,6 @@ package org.apache.pekko.stream.scaladsl object TestConfig { - val numberOfTestsToRun = System.getProperty("akka.stream.test.numberOfRandomizedTests", "10").toInt + val numberOfTestsToRun = System.getProperty("pekko.stream.test.numberOfRandomizedTests", "10").toInt val RandomTestRange = 1 to numberOfTestsToRun } diff --git a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/snapshot/MaterializerStateSpec.scala b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/snapshot/MaterializerStateSpec.scala index fe3bfd15aa..d1647fdf5f 100644 --- a/akka-stream-tests/src/test/scala/org/apache/pekko/stream/snapshot/MaterializerStateSpec.scala +++ b/akka-stream-tests/src/test/scala/org/apache/pekko/stream/snapshot/MaterializerStateSpec.scala @@ -52,9 +52,9 @@ class MaterializerStateSpec extends AkkaSpec() { "snapshot a running stream that includes a TLSActor" in { Source.never - .via(Tcp(system).outgoingConnectionWithTls(InetSocketAddress.createUnresolved("akka.io", 443), + .via(Tcp(system).outgoingConnectionWithTls(InetSocketAddress.createUnresolved("pekko.io", 443), () => { - val engine = SSLContext.getDefault.createSSLEngine("akka.io", 443) + val engine = SSLContext.getDefault.createSSLEngine("pekko.io", 443) engine.setUseClientMode(true) engine })) diff --git a/akka-stream/src/main/resources/reference.conf b/akka-stream/src/main/resources/reference.conf index 139433fe9f..124f98003d 100644 --- a/akka-stream/src/main/resources/reference.conf +++ b/akka-stream/src/main/resources/reference.conf @@ -1,10 +1,10 @@ ##################################### -# Akka Stream Reference Config File # +# Pekko Stream Reference Config File # ##################################### # eager creation of the system wide materializer -akka.library-extensions += "org.apache.pekko.stream.SystemMaterializer$" -akka { +pekko.library-extensions += "org.apache.pekko.stream.SystemMaterializer$" +pekko { stream { # Default materializer settings @@ -17,12 +17,12 @@ akka { # Fully qualified config path which holds the dispatcher configuration # or full dispatcher configuration to be used by ActorMaterializer when creating Actors. - dispatcher = "akka.actor.default-dispatcher" + dispatcher = "pekko.actor.default-dispatcher" # Fully qualified config path which holds the dispatcher configuration # or full dispatcher configuration to be used by stream operators that # perform blocking operations - blocking-io-dispatcher = "akka.actor.default-blocking-io-dispatcher" + blocking-io-dispatcher = "pekko.actor.default-blocking-io-dispatcher" # Cleanup leaked publishers and subscribers when they are not used within a given # deadline @@ -37,7 +37,7 @@ akka { mode = cancel # time after which a subscriber / publisher is considered stale and eligible - # for cancelation (see `akka.stream.subscription-timeout.mode`) + # for cancelation (see `pekko.stream.subscription-timeout.mode`) timeout = 5s } @@ -50,7 +50,7 @@ akka { # Enable automatic fusing of all graphs that are run. For short-lived streams # this may cause an initial runtime overhead, but most of the time fusing is # desirable since it reduces the number of Actors that are created. - # Deprecated, since Akka 2.5.0, setting does not have any effect. + # Deprecated, since Pekko 2.5.0, setting does not have any effect. auto-fusing = on # Those stream elements which have explicit buffers (like mapAsync, mapAsyncUnordered, @@ -162,15 +162,15 @@ akka { //#stream-ref } - # Deprecated, left here to not break Akka HTTP which refers to it - blocking-io-dispatcher = "akka.actor.default-blocking-io-dispatcher" + # Deprecated, left here to not break Pekko HTTP which refers to it + blocking-io-dispatcher = "pekko.actor.default-blocking-io-dispatcher" - # Deprecated, will not be used unless user code refer to it, use 'akka.stream.materializer.blocking-io-dispatcher' + # Deprecated, will not be used unless user code refer to it, use 'pekko.stream.materializer.blocking-io-dispatcher' # instead, or if from code, prefer the 'ActorAttributes.IODispatcher' attribute - default-blocking-io-dispatcher = "akka.actor.default-blocking-io-dispatcher" + default-blocking-io-dispatcher = "pekko.actor.default-blocking-io-dispatcher" } - # configure overrides to ssl-configuration here (to be used by akka-streams, and akka-http – i.e. when serving https connections) + # configure overrides to ssl-configuration here (to be used by pekko-streams, and pekko-http – i.e. when serving https connections) ssl-config { protocol = "TLSv1.2" } @@ -178,13 +178,13 @@ akka { actor { serializers { - akka-stream-ref = "org.apache.pekko.stream.serialization.StreamRefSerializer" + pekko-stream-ref = "org.apache.pekko.stream.serialization.StreamRefSerializer" } serialization-bindings { - "org.apache.pekko.stream.SinkRef" = akka-stream-ref - "org.apache.pekko.stream.SourceRef" = akka-stream-ref - "org.apache.pekko.stream.impl.streamref.StreamRefsProtocol" = akka-stream-ref + "org.apache.pekko.stream.SinkRef" = pekko-stream-ref + "org.apache.pekko.stream.SourceRef" = pekko-stream-ref + "org.apache.pekko.stream.impl.streamref.StreamRefsProtocol" = pekko-stream-ref } serialization-identifiers { @@ -194,7 +194,7 @@ akka { } # ssl configuration -# folded in from former ssl-config-akka module +# folded in from former ssl-config-pekko module ssl-config { - logger = "com.typesafe.sslconfig.akka.util.AkkaLoggerBridge" + logger = "com.typesafe.sslconfig.pekko.util.AkkaLoggerBridge" } diff --git a/akka-stream/src/main/scala/com/typesafe/sslconfig/pekko/AkkaSSLConfig.scala b/akka-stream/src/main/scala/com/typesafe/sslconfig/pekko/AkkaSSLConfig.scala index 4b658d8e29..fc06bedd3c 100644 --- a/akka-stream/src/main/scala/com/typesafe/sslconfig/pekko/AkkaSSLConfig.scala +++ b/akka-stream/src/main/scala/com/typesafe/sslconfig/pekko/AkkaSSLConfig.scala @@ -30,7 +30,7 @@ object AkkaSSLConfig extends ExtensionId[AkkaSSLConfig] with ExtensionIdProvider new AkkaSSLConfig(system, defaultSSLConfigSettings(system)) def defaultSSLConfigSettings(system: ActorSystem): SSLConfigSettings = { - val akkaOverrides = system.settings.config.getConfig("akka.ssl-config") + val akkaOverrides = system.settings.config.getConfig("pekko.ssl-config") val defaults = system.settings.config.getConfig("ssl-config") SSLConfigFactory.parse(akkaOverrides.withFallback(defaults)) } diff --git a/akka-stream/src/main/scala/org/apache/pekko/stream/ActorMaterializer.scala b/akka-stream/src/main/scala/org/apache/pekko/stream/ActorMaterializer.scala index f74ee756a2..3e4f60037d 100644 --- a/akka-stream/src/main/scala/org/apache/pekko/stream/ActorMaterializer.scala +++ b/akka-stream/src/main/scala/org/apache/pekko/stream/ActorMaterializer.scala @@ -314,7 +314,7 @@ object ActorMaterializerSettings { maxFixedBufferSize, 1000, IOSettings(tcpWriteBufferSize = 16 * 1024), - StreamRefSettings(config.getConfig("akka.stream.materializer.stream-ref")), + StreamRefSettings(config.getConfig("pekko.stream.materializer.stream-ref")), config.getString(ActorAttributes.IODispatcher.dispatcher)) } @@ -328,7 +328,7 @@ object ActorMaterializerSettings { "Use config or attributes to configure the materializer. See migration guide for details https://doc.akka.io/docs/akka/2.6/project/migration-guide-2.5.x-2.6.x.html", "2.6.0") def apply(system: ActorSystem): ActorMaterializerSettings = - apply(system.settings.config.getConfig("akka.stream.materializer")) + apply(system.settings.config.getConfig("pekko.stream.materializer")) /** * Create [[ActorMaterializerSettings]] from a Config subsection (Scala). @@ -391,7 +391,7 @@ object ActorMaterializerSettings { maxFixedBufferSize, 1000, IOSettings(tcpWriteBufferSize = 16 * 1024), - StreamRefSettings(config.getConfig("akka.stream.materializer.stream-ref")), + StreamRefSettings(config.getConfig("pekko.stream.materializer.stream-ref")), config.getString(ActorAttributes.IODispatcher.dispatcher)) } @@ -495,7 +495,7 @@ final class ActorMaterializerSettings @InternalApi private ( maxFixedBufferSize, syncProcessingLimit, ioSettings, - StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")), + StreamRefSettings(ConfigFactory.defaultReference().getConfig("pekko.stream.materializer.stream-ref")), ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher)) // backwards compatibility when added IOSettings, shouldn't be needed since private, but added to satisfy mima @@ -526,7 +526,7 @@ final class ActorMaterializerSettings @InternalApi private ( maxFixedBufferSize, syncProcessingLimit, IOSettings(tcpWriteBufferSize = 16 * 1024), - StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")), + StreamRefSettings(ConfigFactory.defaultReference().getConfig("pekko.stream.materializer.stream-ref")), ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher)) // backwards compatibility when added IOSettings, shouldn't be needed since private, but added to satisfy mima @@ -556,7 +556,7 @@ final class ActorMaterializerSettings @InternalApi private ( maxFixedBufferSize, 1000, IOSettings(tcpWriteBufferSize = 16 * 1024), - StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")), + StreamRefSettings(ConfigFactory.defaultReference().getConfig("pekko.stream.materializer.stream-ref")), ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher)) private def copy( @@ -770,13 +770,13 @@ final class ActorMaterializerSettings @InternalApi private ( object IOSettings { @deprecated( - "Use setting 'akka.stream.materializer.io.tcp.write-buffer-size' or attribute TcpAttributes.writeBufferSize instead", + "Use setting 'pekko.stream.materializer.io.tcp.write-buffer-size' or attribute TcpAttributes.writeBufferSize instead", "2.6.0") def apply(system: ActorSystem): IOSettings = - apply(system.settings.config.getConfig("akka.stream.materializer.io")) + apply(system.settings.config.getConfig("pekko.stream.materializer.io")) @deprecated( - "Use setting 'akka.stream.materializer.io.tcp.write-buffer-size' or attribute TcpAttributes.writeBufferSize instead", + "Use setting 'pekko.stream.materializer.io.tcp.write-buffer-size' or attribute TcpAttributes.writeBufferSize instead", "2.6.0") def apply(config: Config): IOSettings = new IOSettings( @@ -784,26 +784,26 @@ object IOSettings { coalesceWrites = config.getInt("tcp.coalesce-writes")) @deprecated( - "Use setting 'akka.stream.materializer.io.tcp.write-buffer-size' or attribute TcpAttributes.writeBufferSize instead", + "Use setting 'pekko.stream.materializer.io.tcp.write-buffer-size' or attribute TcpAttributes.writeBufferSize instead", "2.6.0") def apply(tcpWriteBufferSize: Int): IOSettings = new IOSettings(tcpWriteBufferSize) /** Java API */ @deprecated( - "Use setting 'akka.stream.materializer.io.tcp.write-buffer-size' or attribute TcpAttributes.writeBufferSize instead", + "Use setting 'pekko.stream.materializer.io.tcp.write-buffer-size' or attribute TcpAttributes.writeBufferSize instead", "2.6.0") def create(config: Config) = apply(config) /** Java API */ @deprecated( - "Use setting 'akka.stream.materializer.io.tcp.write-buffer-size' or attribute TcpAttributes.writeBufferSize instead", + "Use setting 'pekko.stream.materializer.io.tcp.write-buffer-size' or attribute TcpAttributes.writeBufferSize instead", "2.6.0") def create(system: ActorSystem) = apply(system) /** Java API */ @deprecated( - "Use setting 'akka.stream.materializer.io.tcp.write-buffer-size' or attribute TcpAttributes.writeBufferSize instead", + "Use setting 'pekko.stream.materializer.io.tcp.write-buffer-size' or attribute TcpAttributes.writeBufferSize instead", "2.6.0") def create(tcpWriteBufferSize: Int): IOSettings = apply(tcpWriteBufferSize) diff --git a/akka-stream/src/main/scala/org/apache/pekko/stream/Attributes.scala b/akka-stream/src/main/scala/org/apache/pekko/stream/Attributes.scala index 508329bb57..96f70c8223 100644 --- a/akka-stream/src/main/scala/org/apache/pekko/stream/Attributes.scala +++ b/akka-stream/src/main/scala/org/apache/pekko/stream/Attributes.scala @@ -732,7 +732,7 @@ object ActorAttributes { final case class SupervisionStrategy(decider: Supervision.Decider) extends MandatoryAttribute - val IODispatcher: Dispatcher = ActorAttributes.Dispatcher("akka.stream.materializer.blocking-io-dispatcher") + val IODispatcher: Dispatcher = ActorAttributes.Dispatcher("pekko.stream.materializer.blocking-io-dispatcher") /** * Specifies the name of the dispatcher. This also adds an async boundary. diff --git a/akka-stream/src/main/scala/org/apache/pekko/stream/StreamRefSettings.scala b/akka-stream/src/main/scala/org/apache/pekko/stream/StreamRefSettings.scala index e716bca8bb..d6fb3bac19 100644 --- a/akka-stream/src/main/scala/org/apache/pekko/stream/StreamRefSettings.scala +++ b/akka-stream/src/main/scala/org/apache/pekko/stream/StreamRefSettings.scala @@ -30,7 +30,7 @@ object StreamRefSettings { "Use attributes on the Runnable graph or change the defaults in configuration, see migration guide for details https://doc.akka.io/docs/akka/2.6/project/migration-guide-2.5.x-2.6.x.html", since = "2.6.0") def apply(system: ActorSystem): StreamRefSettings = { - apply(system.settings.config.getConfig("akka.stream.materializer.stream-ref")) + apply(system.settings.config.getConfig("pekko.stream.materializer.stream-ref")) } /** Java API */ diff --git a/akka-stream/src/main/scala/org/apache/pekko/stream/SystemMaterializer.scala b/akka-stream/src/main/scala/org/apache/pekko/stream/SystemMaterializer.scala index d681e4d995..0d4f52721f 100644 --- a/akka-stream/src/main/scala/org/apache/pekko/stream/SystemMaterializer.scala +++ b/akka-stream/src/main/scala/org/apache/pekko/stream/SystemMaterializer.scala @@ -51,7 +51,7 @@ final class SystemMaterializer(system: ExtendedActorSystem) extends Extension { private[pekko] val materializerSettings = ActorMaterializerSettings(system) private implicit val materializerTimeout: Timeout = - system.settings.config.getDuration("akka.stream.materializer.creation-timeout").asScala + system.settings.config.getDuration("pekko.stream.materializer.creation-timeout").asScala @InternalApi @nowarn("msg=deprecated") private val materializerGuardian = system.systemActorOf( diff --git a/akka-stream/src/main/scala/org/apache/pekko/stream/impl/PhasedFusingActorMaterializer.scala b/akka-stream/src/main/scala/org/apache/pekko/stream/impl/PhasedFusingActorMaterializer.scala index e2c25cdcd4..6f3209c9ec 100644 --- a/akka-stream/src/main/scala/org/apache/pekko/stream/impl/PhasedFusingActorMaterializer.scala +++ b/akka-stream/src/main/scala/org/apache/pekko/stream/impl/PhasedFusingActorMaterializer.scala @@ -401,7 +401,7 @@ private final case class SavedIslandData( private val _logger = Logging.getLogger(system, this) override def logger: LoggingAdapter = _logger private val fuzzingWarningDisabled = - system.settings.config.hasPath("akka.stream.secret-test-fuzzing-warning-disable") + system.settings.config.hasPath("pekko.stream.secret-test-fuzzing-warning-disable") override def shutdown(): Unit = if (haveShutDown.compareAndSet(false, true)) supervisor ! PoisonPill @@ -461,7 +461,7 @@ private final case class SavedIslandData( if (defaultAndGraphAttributes.mandatoryAttribute[ActorAttributes.FuzzingMode].enabled && !fuzzingWarningDisabled) { _logger.warning( "Fuzzing mode is enabled on this system. If you see this warning on your production system then " + - "set 'akka.stream.materializer.debug.fuzzing-mode' to off.") + "set 'pekko.stream.materializer.debug.fuzzing-mode' to off.") } val islandTracking = new IslandTracking( diff --git a/akka-stream/src/main/scala/org/apache/pekko/stream/impl/StreamSubscriptionTimeout.scala b/akka-stream/src/main/scala/org/apache/pekko/stream/impl/StreamSubscriptionTimeout.scala index 7f9966fba6..a9cae1c2bf 100644 --- a/akka-stream/src/main/scala/org/apache/pekko/stream/impl/StreamSubscriptionTimeout.scala +++ b/akka-stream/src/main/scala/org/apache/pekko/stream/impl/StreamSubscriptionTimeout.scala @@ -57,7 +57,7 @@ import pekko.stream.StreamSubscriptionTimeoutTerminationMode.{ CancelTermination * Provides support methods to create Publishers and Subscribers which time-out gracefully, * and are canceled subscribing an `CancellingSubscriber` to the publisher, or by calling `onError` on the timed-out subscriber. * - * See `akka.stream.materializer.subscription-timeout` for configuration options. + * See `pekko.stream.materializer.subscription-timeout` for configuration options. */ @nowarn("msg=deprecated") @InternalApi private[pekko] trait StreamSubscriptionTimeoutSupport { diff --git a/akka-stream/src/main/scala/org/apache/pekko/stream/javadsl/FileIO.scala b/akka-stream/src/main/scala/org/apache/pekko/stream/javadsl/FileIO.scala index 402259d3b8..fbdf69bbdf 100644 --- a/akka-stream/src/main/scala/org/apache/pekko/stream/javadsl/FileIO.scala +++ b/akka-stream/src/main/scala/org/apache/pekko/stream/javadsl/FileIO.scala @@ -29,7 +29,7 @@ object FileIO { * Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * @param f The file to write to @@ -45,7 +45,7 @@ object FileIO { * Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * Accepts as arguments a set of [[java.nio.file.StandardOpenOption]], which will determine @@ -65,7 +65,7 @@ object FileIO { * Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * @param f The file to write to @@ -81,7 +81,7 @@ object FileIO { * Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * Accepts as arguments a set of [[java.nio.file.StandardOpenOption]], which will determine @@ -102,7 +102,7 @@ object FileIO { * Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * Accepts as arguments a set of [[java.nio.file.StandardOpenOption]], which will determine @@ -126,7 +126,7 @@ object FileIO { * Emitted elements are [[ByteString]] elements, chunked by default by 8192 bytes, * except the last element, which will be up to 8192 in size. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, @@ -143,7 +143,7 @@ object FileIO { * Emitted elements are [[ByteString]] elements, chunked by default by 8192 bytes, * except the last element, which will be up to 8192 in size. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, @@ -159,7 +159,7 @@ object FileIO { * Emitted elements are `chunkSize` sized [[ByteString]] elements, * except the last element, which will be up to `chunkSize` in size. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, @@ -178,7 +178,7 @@ object FileIO { * Emitted elements are `chunkSize` sized [[ByteString]] elements, * except the last element, which will be up to `chunkSize` in size. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, @@ -196,7 +196,7 @@ object FileIO { * Emitted elements are `chunkSize` sized [[ByteString]] elements, * except the last element, which will be up to `chunkSize` in size. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * It materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, diff --git a/akka-stream/src/main/scala/org/apache/pekko/stream/javadsl/Source.scala b/akka-stream/src/main/scala/org/apache/pekko/stream/javadsl/Source.scala index 77f72f6093..928e5126e8 100755 --- a/akka-stream/src/main/scala/org/apache/pekko/stream/javadsl/Source.scala +++ b/akka-stream/src/main/scala/org/apache/pekko/stream/javadsl/Source.scala @@ -817,7 +817,7 @@ object Source { * `Restart` supervision strategy will close and create blocking IO again. Default strategy is `Stop` which means * that stream will be terminated on error in `read` function by default. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[ActorAttributes]]. * * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. @@ -843,7 +843,7 @@ object Source { * `Restart` supervision strategy will close and create resource. Default strategy is `Stop` which means * that stream will be terminated on error in `read` function (or future) by default. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[ActorAttributes]]. * * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. diff --git a/akka-stream/src/main/scala/org/apache/pekko/stream/javadsl/StreamConverters.scala b/akka-stream/src/main/scala/org/apache/pekko/stream/javadsl/StreamConverters.scala index 6e74f8b7ff..a3fe739fe4 100644 --- a/akka-stream/src/main/scala/org/apache/pekko/stream/javadsl/StreamConverters.scala +++ b/akka-stream/src/main/scala/org/apache/pekko/stream/javadsl/StreamConverters.scala @@ -32,7 +32,7 @@ object StreamConverters { * Materializes a [[CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * This method uses no auto flush for the [[java.io.OutputStream]] @see [[#fromOutputStream(function.Creator, Boolean)]] if you want to override it. @@ -51,7 +51,7 @@ object StreamConverters { * Materializes a [[CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * The [[OutputStream]] will be closed when the stream flowing into this [[Sink]] is completed. The [[Sink]] @@ -125,7 +125,7 @@ object StreamConverters { * [[java.io.InputStream]] returns on each read invocation. Such chunks will * never be larger than chunkSize though. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * It materializes a [[CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, @@ -146,7 +146,7 @@ object StreamConverters { * [[java.io.InputStream]] returns on each read invocation. Such chunks will * never be larger than chunkSize though. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * It materializes a [[CompletionStage]] of [[IOResult]] containing the number of bytes read from the source file upon completion, @@ -181,7 +181,7 @@ object StreamConverters { * * This Source is intended for inter-operation with legacy APIs since it is inherently blocking. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * The created [[OutputStream]] will be closed when the [[Source]] is cancelled, and closing the [[OutputStream]] @@ -221,7 +221,7 @@ object StreamConverters { * * Be aware that Java ``Stream`` blocks current thread while waiting on next element from downstream. * As it is interacting wit blocking API the implementation runs on a separate dispatcher - * configured through the ``akka.stream.blocking-io-dispatcher``. + * configured through the ``pekko.stream.blocking-io-dispatcher``. */ def asJavaStream[T](): Sink[T, java.util.stream.Stream[T]] = new Sink(scaladsl.StreamConverters.asJavaStream()) diff --git a/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/FileIO.scala b/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/FileIO.scala index a85aff26d2..fc2ef438cc 100644 --- a/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/FileIO.scala +++ b/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/FileIO.scala @@ -26,7 +26,7 @@ object FileIO { * Emitted elements are `chunkSize` sized [[pekko.util.ByteString]] elements, * except the final element, which will be up to `chunkSize` in size. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * It materializes a [[Future]] of [[IOResult]] containing the number of bytes read from the source file upon completion, @@ -45,7 +45,7 @@ object FileIO { * Emitted elements are `chunkSize` sized [[pekko.util.ByteString]] elements, * except the final element, which will be up to `chunkSize` in size. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * It materializes a [[Future]] of [[IOResult]] containing the number of bytes read from the source file upon completion, @@ -63,7 +63,7 @@ object FileIO { * Emitted elements are `chunkSize` sized [[pekko.util.ByteString]] elements, * except the final element, which will be up to `chunkSize` in size. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * It materializes a [[Future]] of [[IOResult]] containing the number of bytes read from the source file upon completion, @@ -84,7 +84,7 @@ object FileIO { * Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. * - * This source is backed by an Actor which will use the dedicated `akka.stream.blocking-io-dispatcher`, + * This source is backed by an Actor which will use the dedicated `pekko.stream.blocking-io-dispatcher`, * unless configured otherwise by using [[pekko.stream.ActorAttributes]]. * * @param f the file to write to @@ -103,7 +103,7 @@ object FileIO { * Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. * - * This source is backed by an Actor which will use the dedicated `akka.stream.blocking-io-dispatcher`, + * This source is backed by an Actor which will use the dedicated `pekko.stream.blocking-io-dispatcher`, * unless configured otherwise by using [[pekko.stream.ActorAttributes]]. * * Accepts as arguments a set of [[java.nio.file.StandardOpenOption]], which will determine @@ -127,7 +127,7 @@ object FileIO { * Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. * - * This source is backed by an Actor which will use the dedicated `akka.stream.blocking-io-dispatcher`, + * This source is backed by an Actor which will use the dedicated `pekko.stream.blocking-io-dispatcher`, * unless configured otherwise by using [[pekko.stream.ActorAttributes]]. * * Accepts as arguments a set of [[java.nio.file.StandardOpenOption]], which will determine diff --git a/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/Source.scala b/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/Source.scala index 4240962a99..85ab5c7ea6 100644 --- a/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/Source.scala +++ b/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/Source.scala @@ -925,7 +925,7 @@ object Source { * `Restart` supervision strategy will close and create blocking IO again. Default strategy is `Stop` which means * that stream will be terminated on error in `read` function by default. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[ActorAttributes]]. * * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. @@ -948,7 +948,7 @@ object Source { * `Restart` supervision strategy will close and create resource. Default strategy is `Stop` which means * that stream will be terminated on error in `read` function (or future) by default. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[ActorAttributes]]. * * Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute. diff --git a/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/StreamConverters.scala b/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/StreamConverters.scala index 7c263e419a..ac6a63c0da 100644 --- a/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/StreamConverters.scala +++ b/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/StreamConverters.scala @@ -32,7 +32,7 @@ object StreamConverters { * [[java.io.InputStream]] returns on each read invocation. Such chunks will * never be larger than chunkSize though. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * * It materializes a [[Future]] of [[IOResult]] containing the number of bytes read from the source file upon completion, @@ -70,7 +70,7 @@ object StreamConverters { * Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. * - * You can configure the default dispatcher for this Source by changing the `akka.stream.materializer.blocking-io-dispatcher` or + * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or * set it for a given Source by using [[pekko.stream.ActorAttributes]]. * If `autoFlush` is true the OutputStream will be flushed whenever a byte array is written, defaults to false. * @@ -173,7 +173,7 @@ object StreamConverters { * * Be aware that Java ``Stream`` blocks current thread while waiting on next element from downstream. * As it is interacting wit blocking API the implementation runs on a separate dispatcher - * configured through the ``akka.stream.blocking-io-dispatcher``. + * configured through the ``pekko.stream.blocking-io-dispatcher``. */ def asJavaStream[T](): Sink[T, java.util.stream.Stream[T]] = { // TODO removing the QueueSink name, see issue #22523 diff --git a/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/Tcp.scala b/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/Tcp.scala index 10b67c8f28..da68c640b3 100644 --- a/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/Tcp.scala +++ b/akka-stream/src/main/scala/org/apache/pekko/stream/scaladsl/Tcp.scala @@ -103,9 +103,9 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider { final class Tcp(system: ExtendedActorSystem) extends pekko.actor.Extension { import Tcp._ - // TODO maybe this should be a new setting, like `akka.stream.tcp.bind.timeout` / `shutdown-timeout` instead? + // TODO maybe this should be a new setting, like `pekko.stream.tcp.bind.timeout` / `shutdown-timeout` instead? val bindShutdownTimeout: FiniteDuration = - system.settings.config.getDuration("akka.stream.materializer.subscription-timeout.timeout").asScala + system.settings.config.getDuration("pekko.stream.materializer.subscription-timeout.timeout").asScala /** * Creates a [[Tcp.ServerBinding]] instance which represents a prospective TCP server binding on the given `endpoint`. diff --git a/akka-testkit/src/main/resources/reference.conf b/akka-testkit/src/main/resources/reference.conf index b86be22331..3d52d83590 100644 --- a/akka-testkit/src/main/resources/reference.conf +++ b/akka-testkit/src/main/resources/reference.conf @@ -1,11 +1,11 @@ ###################################### -# Akka Testkit Reference Config File # +# Pekko Testkit Reference Config File # ###################################### # This is the reference config file that contains all the default settings. # Make your edits/overrides in your application.conf. -akka { +pekko { test { # factor by which to scale timeouts during tests, e.g. to account for shared # build system load diff --git a/akka-testkit/src/main/scala/org/apache/pekko/testkit/CallingThreadDispatcher.scala b/akka-testkit/src/main/scala/org/apache/pekko/testkit/CallingThreadDispatcher.scala index 9cecff6996..21f9f27c46 100644 --- a/akka-testkit/src/main/scala/org/apache/pekko/testkit/CallingThreadDispatcher.scala +++ b/akka-testkit/src/main/scala/org/apache/pekko/testkit/CallingThreadDispatcher.scala @@ -126,7 +126,7 @@ private[testkit] class CallingThreadDispatcherQueues extends Extension { } object CallingThreadDispatcher { - val Id = "akka.test.calling-thread-dispatcher" + val Id = "pekko.test.calling-thread-dispatcher" } /** diff --git a/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestEventListener.scala b/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestEventListener.scala index 9724953568..729e95eb9d 100644 --- a/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestEventListener.scala +++ b/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestEventListener.scala @@ -535,11 +535,11 @@ final case class DeadLettersFilter(val messageClass: Class[_])(occurrences: Int) /** * EventListener for running tests, which allows selectively filtering out * expected messages. To use it, include something like this into - * akka.test.conf and run your tests with system property - * "akka.mode" set to "test": + * pekko.test.conf and run your tests with system property + * "pekko.mode" set to "test": * *

- * akka {
+ * pekko {
  *   loggers = ["org.apache.pekko.testkit.TestEventListener"]
  * }
  * 
diff --git a/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestJavaSerializer.scala b/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestJavaSerializer.scala index a7990ecf7b..d36724afdb 100644 --- a/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestJavaSerializer.scala +++ b/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestJavaSerializer.scala @@ -16,7 +16,7 @@ import pekko.util.ClassLoaderObjectInputStream * between actor systems. It needs to be explicitly enabled in the config (or through `ActorSystemSetup`) like so: * * ``` - * akka.actor.serialization-bindings { + * pekko.actor.serialization-bindings { * "my.test.AdHocMessage" = java-test * } * ``` diff --git a/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestKit.scala b/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestKit.scala index 5c22a1fc50..6eea83d001 100644 --- a/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestKit.scala @@ -240,7 +240,7 @@ trait TestKitBase { /** * Obtain time remaining for execution of the innermost enclosing `within` * block or missing that it returns the properly dilated default for this - * case from settings (key "akka.test.single-expect-default"). + * case from settings (key "pekko.test.single-expect-default"). */ def remainingOrDefault = remainingOr(testKitSettings.SingleExpectDefaultTimeout.dilated) @@ -285,7 +285,7 @@ trait TestKitBase { * block. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". */ def awaitCond( p: => Boolean, @@ -317,7 +317,7 @@ trait TestKitBase { * block. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". */ def awaitAssert[A](a: => A, max: Duration = Duration.Undefined, interval: Duration = 100.millis): A = { val _max = remainingOrDilated(max) @@ -359,7 +359,7 @@ trait TestKitBase { * block. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". */ def assertForDuration[A](a: => A, max: FiniteDuration, interval: Duration = 100.millis): A = { val _max = remainingOrDilated(max) @@ -395,7 +395,7 @@ trait TestKitBase { * the remaining time governed by the innermost enclosing `within` block. * * Note that the timeout is scaled using Duration.dilated, which uses the - * configuration entry "akka.test.timefactor", while the min Duration is not. + * configuration entry "pekko.test.timefactor", while the min Duration is not. * * {{{ * val ret = within(50 millis) { @@ -712,8 +712,8 @@ trait TestKitBase { /** * Assert that no message is received. Waits for the default period configured as - * `akka.test.expect-no-message-default`. - * That timeout is scaled using the configuration entry "akka.test.timefactor". + * `pekko.test.expect-no-message-default`. + * That timeout is scaled using the configuration entry "pekko.test.timefactor". */ @deprecated(message = "Use expectNoMessage instead", since = "2.5.5") def expectNoMsg(): Unit = expectNoMessage() @@ -737,8 +737,8 @@ trait TestKitBase { /** * Assert that no message is received. Waits for the default period configured as - * `akka.test.expect-no-message-default`. - * That timeout is scaled using the configuration entry "akka.test.timefactor". + * `pekko.test.expect-no-message-default`. + * That timeout is scaled using the configuration entry "pekko.test.timefactor". */ def expectNoMessage(): Unit = expectNoMsg_internal(testKitSettings.ExpectNoMessageDefaultTimeout.dilated) @@ -967,7 +967,7 @@ trait TestKitBase { * * It should be noted that for CI servers and the like all maximum Durations * are scaled using their Duration.dilated method, which uses the - * TestKitExtension.Settings.TestTimeFactor settable via akka.conf entry "akka.test.timefactor". + * TestKitExtension.Settings.TestTimeFactor settable via pekko.conf entry "pekko.test.timefactor". * * @since 1.1 */ diff --git a/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestKitExtension.scala b/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestKitExtension.scala index f6dcc9db0f..3de112949d 100644 --- a/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestKitExtension.scala +++ b/akka-testkit/src/main/scala/org/apache/pekko/testkit/TestKitExtension.scala @@ -24,10 +24,10 @@ class TestKitSettings(val config: Config) extends Extension { import pekko.util.Helpers._ val TestTimeFactor: Double = config - .getDouble("akka.test.timefactor") - .requiring(tf => !tf.isInfinite && tf > 0, "akka.test.timefactor must be positive finite double") - val SingleExpectDefaultTimeout: FiniteDuration = config.getMillisDuration("akka.test.single-expect-default") - val ExpectNoMessageDefaultTimeout: FiniteDuration = config.getMillisDuration("akka.test.expect-no-message-default") - val TestEventFilterLeeway: FiniteDuration = config.getMillisDuration("akka.test.filter-leeway") - val DefaultTimeout: Timeout = Timeout(config.getMillisDuration("akka.test.default-timeout")) + .getDouble("pekko.test.timefactor") + .requiring(tf => !tf.isInfinite && tf > 0, "pekko.test.timefactor must be positive finite double") + val SingleExpectDefaultTimeout: FiniteDuration = config.getMillisDuration("pekko.test.single-expect-default") + val ExpectNoMessageDefaultTimeout: FiniteDuration = config.getMillisDuration("pekko.test.expect-no-message-default") + val TestEventFilterLeeway: FiniteDuration = config.getMillisDuration("pekko.test.filter-leeway") + val DefaultTimeout: Timeout = Timeout(config.getMillisDuration("pekko.test.default-timeout")) } diff --git a/akka-testkit/src/main/scala/org/apache/pekko/testkit/javadsl/TestKit.scala b/akka-testkit/src/main/scala/org/apache/pekko/testkit/javadsl/TestKit.scala index b60b4dd0b1..169941a805 100644 --- a/akka-testkit/src/main/scala/org/apache/pekko/testkit/javadsl/TestKit.scala +++ b/akka-testkit/src/main/scala/org/apache/pekko/testkit/javadsl/TestKit.scala @@ -36,8 +36,8 @@ import pekko.util.ccompat.JavaConverters._ * * - It should be noted that for CI servers and the like all maximum Durations * are scaled using the `dilated` method, which uses the - * TestKitExtension.Settings.TestTimeFactor settable via akka.conf entry - * "akka.test.timefactor". + * TestKitExtension.Settings.TestTimeFactor settable via pekko.conf entry + * "pekko.test.timefactor". */ class TestKit(system: ActorSystem) { @@ -172,7 +172,7 @@ class TestKit(system: ActorSystem) { /** * Obtain time remaining for execution of the innermost enclosing `within` * block or missing that it returns the properly dilated default for this - * case from settings (key "akka.test.single-expect-default"). + * case from settings (key "pekko.test.single-expect-default"). */ @Deprecated @deprecated("Use getRemainingOrDefault which returns java.time.Duration instead.", since = "2.5.12") @@ -181,7 +181,7 @@ class TestKit(system: ActorSystem) { /** * Obtain time remaining for execution of the innermost enclosing `within` * block or missing that it returns the properly dilated default for this - * case from settings (key "akka.test.single-expect-default"). + * case from settings (key "pekko.test.single-expect-default"). */ def getRemainingOrDefault: java.time.Duration = tp.remainingOrDefault.asJava @@ -192,7 +192,7 @@ class TestKit(system: ActorSystem) { * the remaining time governed by the innermost enclosing `within` block. * * Note that the timeout is scaled using Duration.dilated, which uses the - * configuration entry "akka.test.timefactor", while the min Duration is not. + * configuration entry "pekko.test.timefactor", while the min Duration is not. * * {{{ * @@ -214,7 +214,7 @@ class TestKit(system: ActorSystem) { * the remaining time governed by the innermost enclosing `within` block. * * Note that the timeout is scaled using Duration.dilated, which uses the - * configuration entry "akka.test.timefactor", while the min Duration is not. + * configuration entry "pekko.test.timefactor", while the min Duration is not. * * {{{ * @@ -235,7 +235,7 @@ class TestKit(system: ActorSystem) { * the remaining time governed by the innermost enclosing `within` block. * * Note that the timeout is scaled using Duration.dilated, which uses the - * configuration entry "akka.test.timefactor", while the min Duration is not. + * configuration entry "pekko.test.timefactor", while the min Duration is not. * * {{{ * @@ -257,7 +257,7 @@ class TestKit(system: ActorSystem) { * the remaining time governed by the innermost enclosing `within` block. * * Note that the timeout is scaled using Duration.dilated, which uses the - * configuration entry "akka.test.timefactor", while the min Duration is not. + * configuration entry "pekko.test.timefactor", while the min Duration is not. * * {{{ * @@ -278,7 +278,7 @@ class TestKit(system: ActorSystem) { * block. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". */ def awaitCond(p: Supplier[Boolean]): Unit = tp.awaitCond(p.get) @@ -290,7 +290,7 @@ class TestKit(system: ActorSystem) { * block. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") @@ -304,7 +304,7 @@ class TestKit(system: ActorSystem) { * block. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". */ def awaitCond(max: java.time.Duration, p: Supplier[Boolean]): Unit = tp.awaitCond(p.get, max.asScala) @@ -316,7 +316,7 @@ class TestKit(system: ActorSystem) { * block. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") @@ -331,7 +331,7 @@ class TestKit(system: ActorSystem) { * block. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". */ def awaitCond(max: java.time.Duration, interval: java.time.Duration, p: Supplier[Boolean]): Unit = tp.awaitCond(p.get, max.asScala, interval.asScala) @@ -344,7 +344,7 @@ class TestKit(system: ActorSystem) { * block. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") @@ -359,7 +359,7 @@ class TestKit(system: ActorSystem) { * block. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". */ def awaitCond(max: java.time.Duration, interval: java.time.Duration, message: String, p: Supplier[Boolean]): Unit = tp.awaitCond(p.get, max.asScala, interval.asScala, message) @@ -374,7 +374,7 @@ class TestKit(system: ActorSystem) { * block. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". */ def awaitAssert[A](a: Supplier[A]): A = tp.awaitAssert(a.get) @@ -388,7 +388,7 @@ class TestKit(system: ActorSystem) { * block. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.13") @@ -404,7 +404,7 @@ class TestKit(system: ActorSystem) { * block. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". */ def awaitAssert[A](max: java.time.Duration, a: Supplier[A]): A = tp.awaitAssert(a.get, max.asScala) @@ -413,7 +413,7 @@ class TestKit(system: ActorSystem) { * If the `max` timeout expires the last exception is thrown. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". * * @return an arbitrary value that would be returned from awaitAssert if successful, if not interested in such value you can return null. */ @@ -426,7 +426,7 @@ class TestKit(system: ActorSystem) { * If the `max` timeout expires the last exception is thrown. * * Note that the timeout is scaled using Duration.dilated, - * which uses the configuration entry "akka.test.timefactor". + * which uses the configuration entry "pekko.test.timefactor". * * @return an arbitrary value that would be returned from awaitAssert if successful, if not interested in such value you can return null. */ @@ -639,16 +639,16 @@ class TestKit(system: ActorSystem) { /** * Assert that no message is received. Waits for the default period configured as - * `akka.actor.testkit.expect-no-message-default`. - * That timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor". + * `pekko.actor.testkit.expect-no-message-default`. + * That timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor". */ @deprecated(message = "Use expectNoMessage instead", since = "2.5.10") def expectNoMsg(): Unit = tp.expectNoMessage() /** * Assert that no message is received. Waits for the default period configured as - * `akka.actor.testkit.expect-no-message-default`. - * That timeout is scaled using the configuration entry "akka.actor.testkit.typed.timefactor". + * `pekko.actor.testkit.expect-no-message-default`. + * That timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor". */ def expectNoMessage(): Unit = tp.expectNoMessage() diff --git a/akka-testkit/src/main/scala/org/apache/pekko/testkit/package.scala b/akka-testkit/src/main/scala/org/apache/pekko/testkit/package.scala index b8b8c3db91..e3606f6cd9 100644 --- a/akka-testkit/src/main/scala/org/apache/pekko/testkit/package.scala +++ b/akka-testkit/src/main/scala/org/apache/pekko/testkit/package.scala @@ -46,7 +46,7 @@ package object testkit { /** * Scala API. Scale timeouts (durations) during tests with the configured - * 'akka.test.timefactor'. + * 'pekko.test.timefactor'. * Implicit class providing `dilated` method. * {{{ * import scala.concurrent.duration._ diff --git a/akka-testkit/src/test/resources/reference.conf b/akka-testkit/src/test/resources/reference.conf index deb3710ea1..b85ab5e054 100644 --- a/akka-testkit/src/test/resources/reference.conf +++ b/akka-testkit/src/test/resources/reference.conf @@ -1,10 +1,10 @@ -akka { +pekko { # Configures MetricsKit test.metrics { # Available reporters are: console, graphite # In order to configure from the command line, use the alternative list syntax: - # -Dakka.test.metrics.reporters.0=console -Dakka.test.metrics.reporters.1=graphite + # -Dpekko.test.metrics.reporters.0=console -Dpekko.test.metrics.reporters.1=graphite reporters = [console] reporter { diff --git a/akka-testkit/src/test/scala/org/apache/pekko/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/org/apache/pekko/testkit/AkkaSpec.scala index 35d3fe9b3b..59716344d4 100644 --- a/akka-testkit/src/test/scala/org/apache/pekko/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/org/apache/pekko/testkit/AkkaSpec.scala @@ -28,7 +28,7 @@ import pekko.testkit.TestEvent._ object AkkaSpec { val testConf: Config = ConfigFactory.parseString(""" - akka { + pekko { loggers = ["org.apache.pekko.testkit.TestEventListener"] loglevel = "WARNING" stdout-loglevel = "WARNING" diff --git a/akka-testkit/src/test/scala/org/apache/pekko/testkit/AkkaSpecSpec.scala b/akka-testkit/src/test/scala/org/apache/pekko/testkit/AkkaSpecSpec.scala index 1514c90231..03bbd9dc2a 100644 --- a/akka-testkit/src/test/scala/org/apache/pekko/testkit/AkkaSpecSpec.scala +++ b/akka-testkit/src/test/scala/org/apache/pekko/testkit/AkkaSpecSpec.scala @@ -41,10 +41,10 @@ class AkkaSpecSpec extends AnyWordSpec with Matchers { // verbose config just for demonstration purposes, please leave in in case of debugging import pekko.util.ccompat.JavaConverters._ val conf = Map( - "akka.actor.debug.lifecycle" -> true, - "akka.actor.debug.event-stream" -> true, - "akka.loglevel" -> "DEBUG", - "akka.stdout-loglevel" -> "DEBUG") + "pekko.actor.debug.lifecycle" -> true, + "pekko.actor.debug.event-stream" -> true, + "pekko.loglevel" -> "DEBUG", + "pekko.stdout-loglevel" -> "DEBUG") val localSystem = ActorSystem("AkkaSpec1", ConfigFactory.parseMap(conf.asJava).withFallback(AkkaSpec.testConf)) var refs = Seq.empty[ActorRef] val spec = new AkkaSpec(localSystem) { refs = Seq(testActor, localSystem.actorOf(Props.empty, "name")) } diff --git a/akka-testkit/src/test/scala/org/apache/pekko/testkit/TestTimeSpec.scala b/akka-testkit/src/test/scala/org/apache/pekko/testkit/TestTimeSpec.scala index 195e19f34d..6a94f1b377 100644 --- a/akka-testkit/src/test/scala/org/apache/pekko/testkit/TestTimeSpec.scala +++ b/akka-testkit/src/test/scala/org/apache/pekko/testkit/TestTimeSpec.scala @@ -8,7 +8,7 @@ import scala.concurrent.duration._ import org.scalatest.exceptions.TestFailedException -class TestTimeSpec extends AkkaSpec(Map("akka.test.timefactor" -> 2.0)) { +class TestTimeSpec extends AkkaSpec(Map("pekko.test.timefactor" -> 2.0)) { "A TestKit" must { diff --git a/akka-testkit/src/test/scala/org/apache/pekko/testkit/metrics/MetricsKit.scala b/akka-testkit/src/test/scala/org/apache/pekko/testkit/metrics/MetricsKit.scala index 7fbbe2b3e3..e1fb7ab88c 100644 --- a/akka-testkit/src/test/scala/org/apache/pekko/testkit/metrics/MetricsKit.scala +++ b/akka-testkit/src/test/scala/org/apache/pekko/testkit/metrics/MetricsKit.scala @@ -38,7 +38,7 @@ private[pekko] trait MetricsKit extends MetricsKitOps { private var reporters: List[ScheduledReporter] = Nil /** - * A configuration containing [[MetricsKitSettings]] under the key `akka.test.registry` must be provided. + * A configuration containing [[MetricsKitSettings]] under the key `pekko.test.registry` must be provided. * This can be the ActorSystems config. * * The reason this is not handled by an Extension is thatwe do not want to enforce having to start an ActorSystem, @@ -208,12 +208,12 @@ private[pekko] class MetricsKitSettings(config: Config) { import pekko.util.Helpers._ - val Reporters = config.getStringList("akka.test.metrics.reporters") + val Reporters = config.getStringList("pekko.test.metrics.reporters") object ConsoleReporter { val ScheduledReportInterval = - config.getMillisDuration("akka.test.metrics.reporter.console.scheduled-report-interval") - val Verbose = config.getBoolean("akka.test.metrics.reporter.console.verbose") + config.getMillisDuration("pekko.test.metrics.reporter.console.scheduled-report-interval") + val Verbose = config.getBoolean("pekko.test.metrics.reporter.console.verbose") } } diff --git a/docs/src/main/categories/additional-sink-and-source-converters.md b/docs/src/main/categories/additional-sink-and-source-converters.md index 9da6bdcc15..e411c7b590 100644 --- a/docs/src/main/categories/additional-sink-and-source-converters.md +++ b/docs/src/main/categories/additional-sink-and-source-converters.md @@ -1,6 +1,6 @@ Sources and sinks for integrating with `java.io.InputStream` and `java.io.OutputStream` can be found on `StreamConverters`. As they are blocking APIs the implementations of these operators are run on a separate -dispatcher configured through the `akka.stream.blocking-io-dispatcher`. +dispatcher configured through the `pekko.stream.blocking-io-dispatcher`. @@@ warning diff --git a/docs/src/main/paradox/actors.md b/docs/src/main/paradox/actors.md index 038e8c24ea..04cbbe2262 100644 --- a/docs/src/main/paradox/actors.md +++ b/docs/src/main/paradox/actors.md @@ -9,7 +9,7 @@ To use Classic Actors, add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion @@ -311,7 +311,7 @@ If the current actor behavior does not match a received message, `unhandled` is called, which by default publishes an @apidoc[actor.UnhandledMessage(message, sender, recipient)](actor.UnhandledMessage) on the actor system’s event stream (set configuration item -`akka.actor.debug.unhandled` to `on` to have them converted into +`pekko.actor.debug.unhandled` to `on` to have them converted into actual Debug messages). In addition, it offers: diff --git a/docs/src/main/paradox/additional/faq.md b/docs/src/main/paradox/additional/faq.md index b1fa84071c..a67dadf76c 100644 --- a/docs/src/main/paradox/additional/faq.md +++ b/docs/src/main/paradox/additional/faq.md @@ -18,7 +18,7 @@ Akka is also: * the name of the goose that Nils traveled across Sweden on in [The Wonderful Adventures of Nils](https://en.wikipedia.org/wiki/The_Wonderful_Adventures_of_Nils) by the Swedish writer Selma Lagerlöf. * the Finnish word for 'nasty elderly woman' and the word for 'elder sister' in the Indian languages Tamil, Telugu, Kannada and Marathi. - * a [font](https://www.dafont.com/akka.font) + * a [font](https://www.dafont.com/pekko.font) * a town in Morocco * a near-earth asteroid @@ -67,7 +67,7 @@ Read more in @ref:[Message Delivery Reliability](../general/message-delivery-rel To turn on debug logging in your actor system add the following to your configuration: ``` -akka.loglevel = DEBUG +pekko.loglevel = DEBUG ``` Read more about it in the docs for @ref:[Logging](../typed/logging.md). diff --git a/docs/src/main/paradox/additional/operations.md b/docs/src/main/paradox/additional/operations.md index 840e146e18..8ddfb9b7af 100644 --- a/docs/src/main/paradox/additional/operations.md +++ b/docs/src/main/paradox/additional/operations.md @@ -59,7 +59,7 @@ Member nodes are identified by their address, in format *`akka://actor-system-na ## Monitoring and Observability -Aside from log monitoring and the monitoring provided by your APM or platform provider, [Lightbend Telemetry](https://developer.lightbend.com/docs/telemetry/current/instrumentations/akka/akka.html), +Aside from log monitoring and the monitoring provided by your APM or platform provider, [Lightbend Telemetry](https://developer.lightbend.com/docs/telemetry/current/instrumentations/akka/pekko.html), available through a [Lightbend Subscription](https://www.lightbend.com/lightbend-subscription), can provide additional insights in the run-time characteristics of your application, including metrics, events, and distributed tracing for Akka Actors, Cluster, HTTP, and more. diff --git a/docs/src/main/paradox/additional/osgi.md b/docs/src/main/paradox/additional/osgi.md index 08ed384d84..cdb587e290 100644 --- a/docs/src/main/paradox/additional/osgi.md +++ b/docs/src/main/paradox/additional/osgi.md @@ -7,7 +7,7 @@ To use Akka in OSGi, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-osgi_$scala.binary.version$ version=AkkaVersion diff --git a/docs/src/main/paradox/additional/rolling-updates.md b/docs/src/main/paradox/additional/rolling-updates.md index cea606aff9..1ec7dece3c 100644 --- a/docs/src/main/paradox/additional/rolling-updates.md +++ b/docs/src/main/paradox/additional/rolling-updates.md @@ -52,7 +52,7 @@ different configured `app-version`. To make use of this feature you need to define the `app-version` and increase it for each rolling update. ``` -akka.cluster.app-version = 1.2.3 +pekko.cluster.app-version = 1.2.3 ``` To understand which is old and new it compares the version numbers using normal conventions, @@ -104,9 +104,9 @@ During rolling updates the configuration from existing nodes should pass the Clu For example, it is possible to migrate Cluster Sharding from Classic to Typed Actors in a rolling update using a two step approach as of Akka version `2.5.23`: -* Deploy with the new nodes set to `akka.cluster.configuration-compatibility-check.enforce-on-join = off` +* Deploy with the new nodes set to `pekko.cluster.configuration-compatibility-check.enforce-on-join = off` and ensure all nodes are in this state -* Deploy again and with the new nodes set to `akka.cluster.configuration-compatibility-check.enforce-on-join = on`. +* Deploy again and with the new nodes set to `pekko.cluster.configuration-compatibility-check.enforce-on-join = on`. Full documentation about enforcing these checks on joining nodes and optionally adding custom checks can be found in @ref:[Akka Cluster configuration compatibility checks](../typed/cluster.md#configuration-compatibility-check). @@ -122,7 +122,7 @@ without bringing down the entire cluster. The procedure for changing from Java serialization to Jackson would look like: 1. Rolling update from 2.5.24 (or later) to 2.6.0 - * Use config `akka.actor.allow-java-serialization=on`. + * Use config `pekko.actor.allow-java-serialization=on`. * Roll out the change. * Java serialization will be used as before. * This step is optional and you could combine it with next step if you like, but could be good to @@ -131,15 +131,15 @@ The procedure for changing from Java serialization to Jackson would look like: * Change message classes by adding the marker interface and possibly needed annotations as described in @ref:[Serialization with Jackson](../serialization-jackson.md). * Test the system with the new serialization in a new test cluster (no rolling update). - * Remove the binding for the marker interface in `akka.actor.serialization-bindings`, so that Jackson is not used for serialization (toBinary) yet. - * Configure `akka.serialization.jackson.allowed-class-prefix=["com.myapp"]` + * Remove the binding for the marker interface in `pekko.actor.serialization-bindings`, so that Jackson is not used for serialization (toBinary) yet. + * Configure `pekko.serialization.jackson.allowed-class-prefix=["com.myapp"]` * This is needed for Jackson deserialization when the `serialization-bindings` isn't defined. * Replace `com.myapp` with the name of the root package of your application to trust all classes. * Roll out the change. * Java serialization is still used, but this version is prepared for next roll out. 1. Rolling update to enable serialization with Jackson. - * Add the binding to the marker interface in `akka.actor.serialization-bindings` to the Jackson serializer. - * Remove `akka.serialization.jackson.allowed-class-prefix`. + * Add the binding to the marker interface in `pekko.actor.serialization-bindings` to the Jackson serializer. + * Remove `pekko.serialization.jackson.allowed-class-prefix`. * Roll out the change. * Old nodes will still send messages with Java serialization, and that can still be deserialized by new nodes. * New nodes will send messages with Jackson serialization, and old node can deserialize those because they were diff --git a/docs/src/main/paradox/camel.md b/docs/src/main/paradox/camel.md index f37aafde3e..978cc34d17 100644 --- a/docs/src/main/paradox/camel.md +++ b/docs/src/main/paradox/camel.md @@ -4,4 +4,4 @@ The akka-camel module was deprecated in 2.5 and has been removed in 2.6. As an alternative we recommend [Alpakka](https://doc.akka.io/docs/alpakka/current/). This is of course not a drop-in replacement. -If anyone is interested in setting up akka-camel as a separate community-maintained repository then please get in touch. \ No newline at end of file +If anyone is interested in setting up akka-camel as a separate community-maintained repository then please get in touch. diff --git a/docs/src/main/paradox/cluster-client.md b/docs/src/main/paradox/cluster-client.md index 2c37e57318..f4a0e3a376 100644 --- a/docs/src/main/paradox/cluster-client.md +++ b/docs/src/main/paradox/cluster-client.md @@ -16,7 +16,7 @@ To use Cluster Client, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion -value1="$akka.version$" +value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-tools_$scala.binary.version$ version=AkkaVersion @@ -114,10 +114,10 @@ of these actors. As always, additional logic should be implemented in the destin ## An Example On the cluster nodes, first start the receptionist. Note, it is recommended to load the extension -when the actor system is started by defining it in the `akka.extensions` configuration property: +when the actor system is started by defining it in the `pekko.extensions` configuration property: ``` -akka.extensions = ["org.apache.pekko.cluster.client.ClusterClientReceptionist"] +pekko.extensions = ["org.apache.pekko.cluster.client.ClusterClientReceptionist"] ``` Next, register the actors that should be available for the client. @@ -164,10 +164,10 @@ Note that the @apidoc[ClusterClientReceptionist] uses the @apidoc[DistributedPub in @ref:[Distributed Publish Subscribe in Cluster](distributed-pub-sub.md). It is recommended to load the extension when the actor system is started by defining it in the -`akka.extensions` configuration property: +`pekko.extensions` configuration property: ``` -akka.extensions = ["akka.cluster.client.ClusterClientReceptionist"] +pekko.extensions = ["pekko.cluster.client.ClusterClientReceptionist"] ``` ## Events diff --git a/docs/src/main/paradox/cluster-metrics.md b/docs/src/main/paradox/cluster-metrics.md index 0a23e90f9b..4e97738285 100644 --- a/docs/src/main/paradox/cluster-metrics.md +++ b/docs/src/main/paradox/cluster-metrics.md @@ -7,7 +7,7 @@ To use Cluster Metrics Extension, you must add the following dependency in your @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-metrics_$scala.binary.version$ version=AkkaVersion @@ -17,7 +17,7 @@ and add the following configuration stanza to your `application.conf` : ``` -akka.extensions = [ "akka.cluster.metrics.ClusterMetricsExtension" ] +pekko.extensions = [ "pekko.cluster.metrics.ClusterMetricsExtension" ] ``` @@project-info{ projectId="akka-cluster-metrics" } @@ -58,7 +58,7 @@ By default, metrics extension will use collector provider fall back and will try Metrics extension periodically publishes current snapshot of the cluster metrics to the node system event bus. -The publication interval is controlled by the `akka.cluster.metrics.collector.sample-interval` setting. +The publication interval is controlled by the `pekko.cluster.metrics.collector.sample-interval` setting. The payload of the `org.apache.pekko.cluster.metrics.ClusterMetricsChanged` event will contain latest metrics of the node as well as other cluster member nodes metrics gossip @@ -102,7 +102,7 @@ User is required to manage both project dependency and library deployment manual When using [Kamon sigar-loader](https://github.com/kamon-io/sigar-loader) and running multiple instances of the same application on the same host, you have to make sure that sigar library is extracted to a unique per instance directory. You can control the extract directory with the -`akka.cluster.metrics.native-library-extract-folder` configuration setting. +`pekko.cluster.metrics.native-library-extract-folder` configuration setting. @@@ @@ -151,7 +151,7 @@ Java As you can see, the router is defined in the same way as other routers, and in this case it is configured as follows: ``` -akka.actor.deployment { +pekko.actor.deployment { /factorialFrontend/factorialBackendRouter = { # Router type provided by metrics extension. router = cluster-metrics-adaptive-group @@ -202,7 +202,7 @@ You can plug-in your own metrics collector instead of built-in Look at those two implementations for inspiration. Custom metrics collector implementation class must be specified in the -`akka.cluster.metrics.collector.provider` configuration property. +`pekko.cluster.metrics.collector.provider` configuration property. ## Configuration diff --git a/docs/src/main/paradox/cluster-routing.md b/docs/src/main/paradox/cluster-routing.md index 381ddc7ab7..fd0cf96f9d 100644 --- a/docs/src/main/paradox/cluster-routing.md +++ b/docs/src/main/paradox/cluster-routing.md @@ -34,7 +34,7 @@ To use Cluster aware routers, you must add the following dependency in your proj @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-cluster_$scala.binary.version$" version=AkkaVersion @@ -46,7 +46,7 @@ When using a `Group` you must start the routee actors on the cluster member node That is not done by the router. The configuration for a group looks like this:: ``` -akka.actor.deployment { +pekko.actor.deployment { /statsService/workerRouter { router = consistent-hashing-group routees.paths = ["/user/statsWorker"] @@ -133,7 +133,7 @@ All nodes start `StatsService` and `StatsWorker` actors. Remember, routees are t The router is configured with `routees.paths`:: ``` -akka.actor.deployment { +pekko.actor.deployment { /statsService/workerRouter { router = consistent-hashing-group routees.paths = ["/user/statsWorker"] @@ -155,7 +155,7 @@ When using a `Pool` with routees created and deployed on the cluster member node the configuration for a router looks like this:: ``` -akka.actor.deployment { +pekko.actor.deployment { /statsService/singleton/workerRouter { router = consistent-hashing-pool cluster { @@ -233,7 +233,7 @@ master. It listens to cluster events to lookup the `StatsService` on the oldest All nodes start `ClusterSingletonProxy` and the `ClusterSingletonManager`. The router is now configured like this:: ``` -akka.actor.deployment { +pekko.actor.deployment { /statsService/singleton/workerRouter { router = consistent-hashing-pool cluster { diff --git a/docs/src/main/paradox/cluster-sharding.md b/docs/src/main/paradox/cluster-sharding.md index 7a7e51e2af..96894cf63c 100644 --- a/docs/src/main/paradox/cluster-sharding.md +++ b/docs/src/main/paradox/cluster-sharding.md @@ -10,7 +10,7 @@ To use Cluster Sharding, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-sharding_$scala.binary.version$ version=AkkaVersion @@ -229,7 +229,7 @@ the identifiers of the shards running in a Region and what entities are alive fo a `ShardRegion.ClusterShardingStats` containing the identifiers of the shards running in each region and a count of entities that are alive in each shard. -If any shard queries failed, for example due to timeout if a shard was too busy to reply within the configured `akka.cluster.sharding.shard-region-query-timeout`, +If any shard queries failed, for example due to timeout if a shard was too busy to reply within the configured `pekko.cluster.sharding.shard-region-query-timeout`, `ShardRegion.CurrentShardRegionState` and `ShardRegion.ClusterShardingStats` will also include the set of shard identifiers by region that failed. The type names of all started shards can be acquired via @scala[`ClusterSharding.shardTypeNames`] @java[`ClusterSharding.getShardTypeNames`]. diff --git a/docs/src/main/paradox/cluster-singleton.md b/docs/src/main/paradox/cluster-singleton.md index edfa068b77..f024825ee8 100644 --- a/docs/src/main/paradox/cluster-singleton.md +++ b/docs/src/main/paradox/cluster-singleton.md @@ -10,7 +10,7 @@ To use Cluster Singleton, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-tools_$scala.binary.version$ version=AkkaVersion diff --git a/docs/src/main/paradox/cluster-usage.md b/docs/src/main/paradox/cluster-usage.md index e534f7cefc..d854561601 100644 --- a/docs/src/main/paradox/cluster-usage.md +++ b/docs/src/main/paradox/cluster-usage.md @@ -26,7 +26,7 @@ To use Akka Cluster add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-cluster_$scala.binary.version$" version=AkkaVersion @@ -51,7 +51,7 @@ Scala Java : @@snip [SimpleClusterListener.java](/docs/src/test/java/jdocs/cluster/SimpleClusterListener.java) { type=java } -And the minimum configuration required is to set a host/port for remoting and the `akka.actor.provider = "cluster"`. +And the minimum configuration required is to set a host/port for remoting and the `pekko.actor.provider = "cluster"`. @@snip [BasicClusterExampleSpec.scala](/akka-cluster-typed/src/test/scala/docs/org/apache/pekko/cluster/typed/BasicClusterExampleSpec.scala) { #config-seeds } @@ -219,14 +219,14 @@ With a configuration option you can define required number of members before the leader changes member status of 'Joining' members to 'Up'.: ``` -akka.cluster.min-nr-of-members = 3 +pekko.cluster.min-nr-of-members = 3 ``` In a similar way you can define required number of members of a certain role before the leader changes member status of 'Joining' members to 'Up'.: ``` -akka.cluster.role { +pekko.cluster.role { frontend.min-nr-of-members = 1 backend.min-nr-of-members = 2 } @@ -234,7 +234,7 @@ akka.cluster.role { You can start actors or trigger any functions using the @apidoc[registerOnMemberUp](cluster.Cluster) {scala="#registerOnMemberUp[T](code:=%3ET):Unit" java="#registerOnMemberUp(java.lang.Runnable)"} callback, which will be invoked when the current member status is changed to 'Up'. This can additionally be used with -`akka.cluster.min-nr-of-members` optional configuration to defer an action until the cluster has reached a certain size. +`pekko.cluster.min-nr-of-members` optional configuration to defer an action until the cluster has reached a certain size. Scala : @@snip [FactorialFrontend.scala](/docs/src/test/scala/docs/cluster/FactorialFrontend.scala) { #registerOnUp } diff --git a/docs/src/main/paradox/coordinated-shutdown.md b/docs/src/main/paradox/coordinated-shutdown.md index d16a76d7f5..bbd876e1a3 100644 --- a/docs/src/main/paradox/coordinated-shutdown.md +++ b/docs/src/main/paradox/coordinated-shutdown.md @@ -8,7 +8,7 @@ The @apidoc[CoordinatedShutdown$] extension registers internal and user-defined Especially the phases `before-service-unbind`, `before-cluster-shutdown` and `before-actor-system-terminate` are intended for application specific phases or tasks. -The order of the shutdown phases is defined in configuration `akka.coordinated-shutdown.phases`. See the default phases in the `reference.conf` tab: +The order of the shutdown phases is defined in configuration `pekko.coordinated-shutdown.phases`. See the default phases in the `reference.conf` tab: Most relevant default phases : | Phase | Description | @@ -83,7 +83,7 @@ JVM is not forcefully stopped (it will be stopped if all non-daemon threads have To enable a hard `System.exit` as a final action you can configure: ``` -akka.coordinated-shutdown.exit-jvm = on +pekko.coordinated-shutdown.exit-jvm = on ``` The coordinated shutdown process is also started once the actor system's root actor is stopped. @@ -98,7 +98,7 @@ By default, the `CoordinatedShutdown` will be run when the JVM process exits, e. via `kill SIGTERM` signal (`SIGINT` ctrl-c doesn't work). This behavior can be disabled with: ``` -akka.coordinated-shutdown.run-by-jvm-shutdown-hook=off +pekko.coordinated-shutdown.run-by-jvm-shutdown-hook=off ``` If you have application specific JVM shutdown hooks it's recommended that you register them via the @@ -117,8 +117,8 @@ used in the test: ``` # Don't terminate ActorSystem via CoordinatedShutdown in tests -akka.coordinated-shutdown.terminate-actor-system = off -akka.coordinated-shutdown.run-by-actor-system-terminate = off -akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off -akka.cluster.run-coordinated-shutdown-when-down = off +pekko.coordinated-shutdown.terminate-actor-system = off +pekko.coordinated-shutdown.run-by-actor-system-terminate = off +pekko.coordinated-shutdown.run-by-jvm-shutdown-hook = off +pekko.cluster.run-coordinated-shutdown-when-down = off ``` diff --git a/docs/src/main/paradox/coordination.md b/docs/src/main/paradox/coordination.md index 456fd81b84..59e6550731 100644 --- a/docs/src/main/paradox/coordination.md +++ b/docs/src/main/paradox/coordination.md @@ -10,7 +10,7 @@ Akka Coordination is a set of tools for distributed coordination. @@dependency[sbt,Gradle,Maven] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-coordination_$scala.binary.version$" version=AkkaVersion @@ -105,7 +105,7 @@ If a user prefers to have outside intervention in this case for maximum safety t The configuration must define the `lease-class` property for the FQCN of the lease implementation. -The lease implementation should have support for the following properties where the defaults come from `akka.coordination.lease`: +The lease implementation should have support for the following properties where the defaults come from `pekko.coordination.lease`: @@snip [reference.conf](/akka-coordination/src/main/resources/reference.conf) { #defaults } diff --git a/docs/src/main/paradox/discovery/index.md b/docs/src/main/paradox/discovery/index.md index 24ec1445d3..7ad68d7150 100644 --- a/docs/src/main/paradox/discovery/index.md +++ b/docs/src/main/paradox/discovery/index.md @@ -36,7 +36,7 @@ See @ref:[Migration hints](#migrating-from-akka-management-discovery-before-1-0- @@dependency[sbt,Gradle,Maven] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-discovery_$scala.binary.version$" version=AkkaVersion @@ -78,7 +78,7 @@ Port can be used when a service opens multiple ports e.g. a HTTP port and an Akk @@@ note { title="Async DNS" } -Akka Discovery with DNS does always use the @ref[Akka-native "async-dns" implementation](../io-dns.md) (it is independent of the `akka.io.dns.resolver` setting). +Akka Discovery with DNS does always use the @ref[Akka-native "async-dns" implementation](../io-dns.md) (it is independent of the `pekko.io.dns.resolver` setting). @@@ @@ -93,7 +93,7 @@ The mapping between Akka service discovery terminology and SRV terminology: * SRV name = serviceName * SRV protocol = protocol -Configure `akka-dns` to be used as the discovery implementation in your `application.conf`: +Configure `pekko-dns` to be used as the discovery implementation in your `application.conf`: @@snip[application.conf](/docs/src/test/scala/docs/discovery/DnsDiscoveryDocSpec.scala){ #configure-dns } @@ -115,9 +115,9 @@ The advantage of SRV records is that they can include a port. Lookups with all the fields set become SRV queries. For example: ``` -dig srv _service._tcp.akka.test +dig srv _service._tcp.pekko.test -; <<>> DiG 9.11.3-RedHat-9.11.3-6.fc28 <<>> srv service.tcp.akka.test +; <<>> DiG 9.11.3-RedHat-9.11.3-6.fc28 <<>> srv service.tcp.pekko.test ;; global options: +cmd ;; Got answer: ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 60023 @@ -127,25 +127,25 @@ dig srv _service._tcp.akka.test ; EDNS: version: 0, flags:; udp: 4096 ; COOKIE: 5ab8dd4622e632f6190f54de5b28bb8fb1b930a5333c3862 (good) ;; QUESTION SECTION: -;service.tcp.akka.test. IN SRV +;service.tcp.pekko.test. IN SRV ;; ANSWER SECTION: -_service._tcp.akka.test. 86400 IN SRV 10 60 5060 a-single.akka.test. -_service._tcp.akka.test. 86400 IN SRV 10 40 5070 a-double.akka.test. +_service._tcp.pekko.test. 86400 IN SRV 10 60 5060 a-single.pekko.test. +_service._tcp.pekko.test. 86400 IN SRV 10 40 5070 a-double.pekko.test. ``` -In this case `service.tcp.akka.test` resolves to `a-single.akka.test` on port `5060` -and `a-double.akka.test` on port `5070`. Currently discovery does not support the weightings. +In this case `service.tcp.pekko.test` resolves to `a-single.pekko.test` on port `5060` +and `a-double.pekko.test` on port `5070`. Currently discovery does not support the weightings. #### A/AAAA records Lookups with any fields missing become A/AAAA record queries. For example: ``` -dig a-double.akka.test +dig a-double.pekko.test -; <<>> DiG 9.11.3-RedHat-9.11.3-6.fc28 <<>> a-double.akka.test +; <<>> DiG 9.11.3-RedHat-9.11.3-6.fc28 <<>> a-double.pekko.test ;; global options: +cmd ;; Got answer: ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 11983 @@ -155,15 +155,15 @@ dig a-double.akka.test ; EDNS: version: 0, flags:; udp: 4096 ; COOKIE: 16e9815d9ca2514d2f3879265b28bad05ff7b4a82721edd0 (good) ;; QUESTION SECTION: -;a-double.akka.test. IN A +;a-double.pekko.test. IN A ;; ANSWER SECTION: -a-double.akka.test. 86400 IN A 192.168.1.21 -a-double.akka.test. 86400 IN A 192.168.1.22 +a-double.pekko.test. 86400 IN A 192.168.1.21 +a-double.pekko.test. 86400 IN A 192.168.1.22 ``` -In this case `a-double.akka.test` would resolve to `192.168.1.21` and `192.168.1.22`. +In this case `a-double.pekko.test` would resolve to `192.168.1.21` and `192.168.1.22`. ## Discovery Method: Configuration @@ -177,15 +177,15 @@ sophisticated discovery method without any code changes. Configure it to be used as discovery method in your `application.conf` ``` -akka { +pekko { discovery.method = config } ``` -By default the services discoverable are defined in `akka.discovery.config.services` and have the following format: +By default the services discoverable are defined in `pekko.discovery.config.services` and have the following format: ``` -akka.discovery.config.services = { +pekko.discovery.config.services = { service1 = { endpoints = [ { @@ -215,14 +215,14 @@ via DNS and fall back to configuration. To use aggregate discovery add its dependency as well as all of the discovery that you want to aggregate. -Configure `aggregate` as `akka.discovery.method` and which discovery methods are tried and in which order. +Configure `aggregate` as `pekko.discovery.method` and which discovery methods are tried and in which order. ``` -akka { +pekko { discovery { method = aggregate aggregate { - discovery-methods = ["akka-dns", "config"] + discovery-methods = ["pekko-dns", "config"] } config { services { @@ -245,7 +245,7 @@ akka { ``` -The above configuration will result in `akka-dns` first being checked and if it fails or returns no +The above configuration will result in `pekko-dns` first being checked and if it fails or returns no targets for the given service name then `config` is queried which i configured with one service called `service1` which two hosts `host1` and `host2`. @@ -258,8 +258,8 @@ At least version `1.0.0` of any Akka Management module should be used if also us Migration steps: * Any custom discovery method should now implement `org.apache.pekko.discovery.ServiceDiscovery` -* `discovery-method` now has to be a configuration location under `akka.discovery` with at minimum a property `class` specifying the fully qualified name of the implementation of `org.apache.pekko.discovery.ServiceDiscovery`. - Previous versions allowed this to be a class name or a fully qualified config location e.g. `akka.discovery.kubernetes-api` rather than just `kubernetes-api` +* `discovery-method` now has to be a configuration location under `pekko.discovery` with at minimum a property `class` specifying the fully qualified name of the implementation of `org.apache.pekko.discovery.ServiceDiscovery`. + Previous versions allowed this to be a class name or a fully qualified config location e.g. `pekko.discovery.kubernetes-api` rather than just `kubernetes-api` diff --git a/docs/src/main/paradox/dispatchers.md b/docs/src/main/paradox/dispatchers.md index 0b6cc7f51f..4c5676c5bd 100644 --- a/docs/src/main/paradox/dispatchers.md +++ b/docs/src/main/paradox/dispatchers.md @@ -10,7 +10,7 @@ Dispatchers are part of core Akka, which means that they are part of the akka-ac @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/distributed-data.md b/docs/src/main/paradox/distributed-data.md index 4ca36d7a23..91c65260bb 100644 --- a/docs/src/main/paradox/distributed-data.md +++ b/docs/src/main/paradox/distributed-data.md @@ -10,7 +10,7 @@ To use Akka Distributed Data, you must add the following dependency in your proj @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-distributed-data_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/distributed-pub-sub.md b/docs/src/main/paradox/distributed-pub-sub.md index 5c86b5a92f..89e1de5423 100644 --- a/docs/src/main/paradox/distributed-pub-sub.md +++ b/docs/src/main/paradox/distributed-pub-sub.md @@ -10,7 +10,7 @@ To use Distributed Publish Subscribe you must add the following dependency in yo @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-cluster-tools_$scala.binary.version$" version=AkkaVersion @@ -224,11 +224,11 @@ The `DistributedPubSub` extension can be configured with the following propertie @@snip [reference.conf](/akka-cluster-tools/src/main/resources/reference.conf) { #pub-sub-ext-config } It is recommended to load the extension when the actor system is started by defining it in -`akka.extensions` configuration property. Otherwise it will be activated when first used +`pekko.extensions` configuration property. Otherwise it will be activated when first used and then it takes a while for it to be populated. ``` -akka.extensions = ["org.apache.pekko.cluster.pubsub.DistributedPubSub"] +pekko.extensions = ["org.apache.pekko.cluster.pubsub.DistributedPubSub"] ``` ## Delivery Guarantee diff --git a/docs/src/main/paradox/durable-state/persistence-query.md b/docs/src/main/paradox/durable-state/persistence-query.md index f33d3d0bbb..cc69070df9 100644 --- a/docs/src/main/paradox/durable-state/persistence-query.md +++ b/docs/src/main/paradox/durable-state/persistence-query.md @@ -10,7 +10,7 @@ To use Persistence Query, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-persistence-query_$scala.binary.version$ version=AkkaVersion diff --git a/docs/src/main/paradox/event-bus.md b/docs/src/main/paradox/event-bus.md index 40200391a4..ef06e7ba50 100644 --- a/docs/src/main/paradox/event-bus.md +++ b/docs/src/main/paradox/event-bus.md @@ -206,7 +206,7 @@ stream for logging: these are the handlers which are configured for example in `application.conf`: ```text -akka { +pekko { loggers = ["org.apache.pekko.event.Logging$DefaultLogger"] } ``` diff --git a/docs/src/main/paradox/extending-akka.md b/docs/src/main/paradox/extending-akka.md index 52b5605c79..38aad46908 100644 --- a/docs/src/main/paradox/extending-akka.md +++ b/docs/src/main/paradox/extending-akka.md @@ -67,7 +67,7 @@ That's all there is to it! ## Loading from Configuration To be able to load extensions from your Akka configuration you must add FQCNs of implementations of either @apidoc[ExtensionId](actor.ExtensionId) or @apidoc[ExtensionIdProvider](ExtensionIdProvider) -in the `akka.extensions` section of the config you provide to your @apidoc[ActorSystem](actor.ActorSystem). +in the `pekko.extensions` section of the config you provide to your @apidoc[ActorSystem](actor.ActorSystem). Scala : @@snip [ExtensionDocSpec.scala](/docs/src/test/scala/docs/extension/ExtensionDocSpec.scala) { #config } @@ -75,7 +75,7 @@ Scala Java : @@@vars ``` - akka { + pekko { extensions = ["docs.extension.ExtensionDocTest.CountExtension"] } ``` @@ -114,10 +114,10 @@ Java ## Library extensions A third part library may register its extension for auto-loading on actor system startup by appending it to -`akka.library-extensions` in its `reference.conf`. +`pekko.library-extensions` in its `reference.conf`. ``` -akka.library-extensions += "docs.extension.ExampleExtension" +pekko.library-extensions += "docs.extension.ExampleExtension" ``` As there is no way to selectively remove such extensions, it should be used with care and only when there is no case @@ -126,7 +126,7 @@ this could be important is in tests. @@@ warning -The``akka.library-extensions`` must never be assigned (`= ["Extension"]`) instead of appending as this will break +The``pekko.library-extensions`` must never be assigned (`= ["Extension"]`) instead of appending as this will break the library-extension mechanism and make behavior depend on class path ordering. @@@ diff --git a/docs/src/main/paradox/fault-tolerance.md b/docs/src/main/paradox/fault-tolerance.md index 1afcd0f6a0..578eb778f3 100644 --- a/docs/src/main/paradox/fault-tolerance.md +++ b/docs/src/main/paradox/fault-tolerance.md @@ -10,7 +10,7 @@ The concept of fault tolerance relates to actors, so in order to use these make @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion @@ -313,7 +313,7 @@ The `org.apache.pekko.pattern.BackoffOnFailureOptions` and `org.apache.pekko.pat Options are: * `withAutoReset`: The backoff is reset if no failure/stop occurs within the duration. This is the default behaviour with `minBackoff` as default value * `withManualReset`: The child must send `BackoffSupervisor.Reset` to its backoff supervisor (parent) -* `withSupervisionStrategy`: Sets a custom `OneForOneStrategy` (as each backoff supervisor only has one child). The default strategy uses the `akka.actor.SupervisorStrategy.defaultDecider` which stops and starts the child on exceptions. +* `withSupervisionStrategy`: Sets a custom `OneForOneStrategy` (as each backoff supervisor only has one child). The default strategy uses the `pekko.actor.SupervisorStrategy.defaultDecider` which stops and starts the child on exceptions. * `withMaxNrOfRetries`: Sets the maximum number of retries until the supervisor will give up (`-1` is default which means no limit of retries). Note: This is set on the supervision strategy, so setting a different strategy resets the `maxNrOfRetries`. * `withReplyWhileStopped`: By default all messages received while the child is stopped are forwarded to dead letters. With this set, the supervisor will reply to the sender instead. diff --git a/docs/src/main/paradox/fsm.md b/docs/src/main/paradox/fsm.md index 2d0cd5dbdc..04856b0dc3 100644 --- a/docs/src/main/paradox/fsm.md +++ b/docs/src/main/paradox/fsm.md @@ -10,7 +10,7 @@ To use Finite State Machine actors, you must add the following dependency in you @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion @@ -541,7 +541,7 @@ and in the following. ### Event Tracing -The setting `akka.actor.debug.fsm` in @ref:[configuration](general/configuration.md) enables logging of an +The setting `pekko.actor.debug.fsm` in @ref:[configuration](general/configuration.md) enables logging of an event trace by `LoggingFSM` instances: Scala diff --git a/docs/src/main/paradox/futures.md b/docs/src/main/paradox/futures.md index 27ef62b161..79065effef 100644 --- a/docs/src/main/paradox/futures.md +++ b/docs/src/main/paradox/futures.md @@ -7,7 +7,7 @@ Akka offers tiny helpers for use with @scala[@scaladoc[Future](scala.concurrent. @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/general/configuration.md b/docs/src/main/paradox/general/configuration.md index 136a9998af..2e58b2238b 100644 --- a/docs/src/main/paradox/general/configuration.md +++ b/docs/src/main/paradox/general/configuration.md @@ -75,7 +75,7 @@ A custom `application.conf` might look like this: # In this file you can override any option defined in the reference files. # Copy in parts of the reference files and modify as you please. -akka { +pekko { # Logger config for Akka internals and classic actors, the new API relies # directly on SLF4J and your config for the logger backend. @@ -126,7 +126,7 @@ Specifying system property with `-Dconfig.resource=/dev.conf` will load the `dev ``` include "application" -akka { +pekko { loglevel = "DEBUG" } ``` @@ -137,7 +137,7 @@ specification. ## Logging of Configuration -If the system or config property `akka.log-config-on-start` is set to `on`, then the +If the system or config property `pekko.log-config-on-start` is set to `on`, then the complete configuration is logged at INFO level when the actor system is started. This is useful when you are uncertain of what configuration is used. @@ -198,7 +198,7 @@ This implies that putting Akka on the boot class path will yield ## Application specific settings The configuration can also be used for application specific settings. -A good practice is to place those settings in an @ref:[Extension](../extending-akka.md#extending-akka-settings). +A good practice is to place those settings in an @ref:[Extension](../extending-akka.md#extending-akka-settings). ## Configuring multiple ActorSystem @@ -213,11 +213,11 @@ differentiate actor systems within the hierarchy of the configuration: ``` myapp1 { - akka.loglevel = "WARNING" + pekko.loglevel = "WARNING" my.own.setting = 43 } myapp2 { - akka.loglevel = "ERROR" + pekko.loglevel = "ERROR" app2.setting = "appname" } my.own.setting = 42 @@ -235,7 +235,7 @@ trick: in the first case, the configuration accessible from within the actor system is this ```ruby -akka.loglevel = "WARNING" +pekko.loglevel = "WARNING" my.own.setting = 43 my.other.setting = "hello" // plus myapp1 and myapp2 subtrees @@ -245,7 +245,7 @@ while in the second one, only the “akka” subtree is lifted, with the followi result ```ruby -akka.loglevel = "ERROR" +pekko.loglevel = "ERROR" my.own.setting = 42 my.other.setting = "hello" // plus myapp1 and myapp2 subtrees diff --git a/docs/src/main/paradox/index-actors.md b/docs/src/main/paradox/index-actors.md index 9b128623b2..25ed9cbef0 100644 --- a/docs/src/main/paradox/index-actors.md +++ b/docs/src/main/paradox/index-actors.md @@ -9,7 +9,7 @@ To use Classic Akka Actors, you must add the following dependency in your projec @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/index-utilities-classic.md b/docs/src/main/paradox/index-utilities-classic.md index 5834172e46..a9844084f1 100644 --- a/docs/src/main/paradox/index-utilities-classic.md +++ b/docs/src/main/paradox/index-utilities-classic.md @@ -7,7 +7,7 @@ To use Utilities, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/io-dns.md b/docs/src/main/paradox/io-dns.md index 3cbe0eaa2a..235e2bef8e 100644 --- a/docs/src/main/paradox/io-dns.md +++ b/docs/src/main/paradox/io-dns.md @@ -36,7 +36,7 @@ block that specifies the implementation via `provider-object`. @@@ -To select which `DnsProvider` to use set `akka.io.dns.resolver ` to the location of the configuration. +To select which `DnsProvider` to use set `pekko.io.dns.resolver ` to the location of the configuration. There are currently two implementations: @@ -83,7 +83,7 @@ The Async DNS provider has the following advantages: ## SRV Records -To get DNS SRV records `akka.io.dns.resolver` must be set to `async-dns` and `DnsProtocol.Resolve`'s requestType +To get DNS SRV records `pekko.io.dns.resolver` must be set to `async-dns` and `DnsProtocol.Resolve`'s requestType must be set to `DnsProtocol.Srv` Scala diff --git a/docs/src/main/paradox/io-tcp.md b/docs/src/main/paradox/io-tcp.md index 0ae42ce2a7..00d560352d 100644 --- a/docs/src/main/paradox/io-tcp.md +++ b/docs/src/main/paradox/io-tcp.md @@ -10,7 +10,7 @@ To use TCP, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/io-udp.md b/docs/src/main/paradox/io-udp.md index f69ab54da1..4f05b48b63 100644 --- a/docs/src/main/paradox/io-udp.md +++ b/docs/src/main/paradox/io-udp.md @@ -10,7 +10,7 @@ To use UDP, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/io.md b/docs/src/main/paradox/io.md index ffc15da378..dd70118dc4 100644 --- a/docs/src/main/paradox/io.md +++ b/docs/src/main/paradox/io.md @@ -7,7 +7,7 @@ To use I/O, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion @@ -15,7 +15,7 @@ To use I/O, you must add the following dependency in your project: ## Introduction -The `akka.io` package has been developed in collaboration between the Akka +The `pekko.io` package has been developed in collaboration between the Akka and [spray.io](http://spray.io) teams. Its design combines experiences from the `spray-io` module with improvements that were jointly developed for more general consumption as an actor-based service. @@ -113,7 +113,7 @@ result in copying all bytes in that slice. #### Compatibility with java.io -A @apidoc[ByteStringBuilder](util.ByteStringBuilder) can be wrapped in a @javadoc[java.io.OutputStream](java.io.OutputStream) via the @apidoc[asOutputStream](util.ByteStringBuilder) {scala="#asOutputStream:java.io.OutputStream" java="#asOutputStream()"} method. Likewise, @apidoc[ByteIterator](util.ByteIterator) can be wrapped in a @javadoc[java.io.InputStream](java.io.InputStream) via @apidoc[asInputStream](util.ByteIterator) {scala="#asInputStream:java.io.InputStream" java="#asInputStream()"}. Using these, `akka.io` applications can integrate legacy code based on `java.io` streams. +A @apidoc[ByteStringBuilder](util.ByteStringBuilder) can be wrapped in a @javadoc[java.io.OutputStream](java.io.OutputStream) via the @apidoc[asOutputStream](util.ByteStringBuilder) {scala="#asOutputStream:java.io.OutputStream" java="#asOutputStream()"} method. Likewise, @apidoc[ByteIterator](util.ByteIterator) can be wrapped in a @javadoc[java.io.InputStream](java.io.InputStream) via @apidoc[asInputStream](util.ByteIterator) {scala="#asInputStream:java.io.InputStream" java="#asInputStream()"}. Using these, `pekko.io` applications can integrate legacy code based on `java.io` streams. ## Architecture in-depth diff --git a/docs/src/main/paradox/logging.md b/docs/src/main/paradox/logging.md index 5bce2362b2..b513a6eddf 100644 --- a/docs/src/main/paradox/logging.md +++ b/docs/src/main/paradox/logging.md @@ -10,7 +10,7 @@ To use Logging, you must at least use the Akka actors dependency in your project @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion @@ -98,7 +98,7 @@ messages in the actor mailboxes are sent to dead letters. You can also disable l of dead letters during shutdown. ```ruby -akka { +pekko { log-dead-letters = 10 log-dead-letters-during-shutdown = on } @@ -114,7 +114,7 @@ Akka has a few configuration options for very low level debugging. These make mo You almost definitely need to have logging set to `DEBUG` to use any of the options below: ```ruby -akka { +pekko { loglevel = "DEBUG" } ``` @@ -122,7 +122,7 @@ akka { This config option is very good if you want to know what config settings are loaded by Akka: ```ruby -akka { +pekko { # Log the complete configuration at INFO level when the actor system is started. # This is useful when you are uncertain of what configuration is used. log-config-on-start = on @@ -135,7 +135,7 @@ If you want very detailed logging of user-level messages then wrap your actors' @scaladoc[LoggingReceive](pekko.event.LoggingReceive) and enable the `receive` option: ```ruby -akka { +pekko { actor { debug { # enable function of LoggingReceive, which is to log any received message at @@ -152,7 +152,7 @@ If you want very detailed logging of all automatically received messages that ar by Actors: ```ruby -akka { +pekko { actor { debug { # enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill etc.) @@ -165,7 +165,7 @@ akka { If you want very detailed logging of all lifecycle changes of Actors (restarts, deaths etc.): ```ruby -akka { +pekko { actor { debug { # enable DEBUG logging of actor lifecycle changes @@ -178,7 +178,7 @@ akka { If you want unhandled messages logged at `DEBUG`: ```ruby -akka { +pekko { actor { debug { # enable DEBUG logging of unhandled messages @@ -191,7 +191,7 @@ akka { If you want very detailed logging of all events, transitions and timers of FSM Actors that extend LoggingFSM: ```ruby -akka { +pekko { actor { debug { # enable DEBUG logging of all LoggingFSMs for events, transitions and timers @@ -204,7 +204,7 @@ akka { If you want to monitor subscriptions (subscribe/unsubscribe) on the ActorSystem.eventStream: ```ruby -akka { +pekko { actor { debug { # enable DEBUG logging of subscription changes on the eventStream @@ -220,7 +220,7 @@ akka { If you want to see all messages that are sent through remoting at `DEBUG` log level, use the following config option. Note that this logs the messages as they are sent by the transport layer, not by an actor. ```ruby -akka.remote.artery { +pekko.remote.artery { # If this is "on", Akka will log all outbound messages at DEBUG level, # if off then they are not logged log-sent-messages = on @@ -230,7 +230,7 @@ akka.remote.artery { If you want to see all messages that are received through remoting at `DEBUG` log level, use the following config option. Note that this logs the messages as they are received by the transport layer, not by an actor. ```ruby -akka.remote.artery { +pekko.remote.artery { # If this is "on", Akka will log all inbound messages at DEBUG level, # if off then they are not logged log-received-messages = on @@ -277,7 +277,7 @@ might want to do this also in case you implement your own logging adapter. To turn off logging you can configure the log levels to be `OFF` like this. ```ruby -akka { +pekko { stdout-loglevel = "OFF" loglevel = "OFF" } @@ -306,7 +306,7 @@ can be implemented in a custom @apidoc[LoggingFilter], which can be defined in t configuration property. ```ruby -akka { +pekko { # Loggers to register at boot time (org.apache.pekko.event.Logging$DefaultLogger logs # to STDOUT) loggers = ["org.apache.pekko.event.Logging$DefaultLogger"] @@ -340,7 +340,7 @@ Java When the actor system is starting up and shutting down the configured `loggers` are not used. Instead log messages are printed to stdout (System.out). The default log level for this stdout logger is `WARNING` and it can be silenced completely by setting -`akka.stdout-loglevel=OFF`. +`pekko.stdout-loglevel=OFF`. ## SLF4J @@ -350,7 +350,7 @@ It has a single dependency: the slf4j-api jar. In your runtime, you also need a @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-slf4j_$scala.binary.version$" version=AkkaVersion @@ -372,13 +372,13 @@ If you set the `loglevel` to a higher level than `DEBUG`, any `DEBUG` events wil out already at the source and will never reach the logging backend, regardless of how the backend is configured. -You can enable `DEBUG` level for `akka.loglevel` and control the actual level in the SLF4J backend +You can enable `DEBUG` level for `pekko.loglevel` and control the actual level in the SLF4J backend without any significant overhead, also for production. @@@ ```ruby -akka { +pekko { loggers = ["org.apache.pekko.event.slf4j.Slf4jLogger"] loglevel = "DEBUG" logging-filter = "org.apache.pekko.event.slf4j.Slf4jLoggingFilter" diff --git a/docs/src/main/paradox/mailboxes.md b/docs/src/main/paradox/mailboxes.md index 4409647739..99adfe2fec 100644 --- a/docs/src/main/paradox/mailboxes.md +++ b/docs/src/main/paradox/mailboxes.md @@ -10,7 +10,7 @@ To use Mailboxes, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion @@ -38,7 +38,7 @@ it can be used as the default mailbox, but it cannot be used with a BalancingDis Configuration of `SingleConsumerOnlyUnboundedMailbox` as default mailbox: ``` -akka.actor.default-mailbox { +pekko.actor.default-mailbox { mailbox-type = "org.apache.pekko.dispatch.SingleConsumerOnlyUnboundedMailbox" } ``` @@ -107,7 +107,7 @@ that fails then the dispatcher's requirement—if any—will be tried instead. 5. If the dispatcher requires a mailbox type as described above then the mapping for that requirement will be used to determine the mailbox type to be used. - 6. The default mailbox `akka.actor.default-mailbox` will be used. + 6. The default mailbox `pekko.actor.default-mailbox` will be used. ## Mailbox configuration examples diff --git a/docs/src/main/paradox/multi-jvm-testing.md b/docs/src/main/paradox/multi-jvm-testing.md index 5748f2fe84..253a2a7513 100644 --- a/docs/src/main/paradox/multi-jvm-testing.md +++ b/docs/src/main/paradox/multi-jvm-testing.md @@ -149,26 +149,26 @@ You can define specific JVM options for each of the spawned JVMs. You do that by a file named after the node in the test with suffix `.opts` and put them in the same directory as the test. -For example, to feed the JVM options `-Dakka.remote.port=9991` and `-Xmx256m` to the `SampleMultiJvmNode1` +For example, to feed the JVM options `-Dpekko.remote.port=9991` and `-Xmx256m` to the `SampleMultiJvmNode1` let's create three `*.opts` files and add the options to them. Separate multiple options with space. `SampleMultiJvmNode1.opts`: ``` --Dakka.remote.port=9991 -Xmx256m +-Dpekko.remote.port=9991 -Xmx256m ``` `SampleMultiJvmNode2.opts`: ``` --Dakka.remote.port=9992 -Xmx256m +-Dpekko.remote.port=9992 -Xmx256m ``` `SampleMultiJvmNode3.opts`: ``` --Dakka.remote.port=9993 -Xmx256m +-Dpekko.remote.port=9993 -Xmx256m ``` ## ScalaTest diff --git a/docs/src/main/paradox/multi-node-testing.md b/docs/src/main/paradox/multi-node-testing.md index 2fe5db09d9..9e55965791 100644 --- a/docs/src/main/paradox/multi-node-testing.md +++ b/docs/src/main/paradox/multi-node-testing.md @@ -10,7 +10,7 @@ To use Multi Node Testing, you must add the following dependency in your project @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-multi-node-testkit_$scala.binary.version$ version=AkkaVersion diff --git a/docs/src/main/paradox/persistence-fsm.md b/docs/src/main/paradox/persistence-fsm.md index 79dbd0ed37..8f7eab3caf 100644 --- a/docs/src/main/paradox/persistence-fsm.md +++ b/docs/src/main/paradox/persistence-fsm.md @@ -9,7 +9,7 @@ Persistent FSMs are part of Akka persistence, you must add the following depende @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-persistence_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/persistence-journals.md b/docs/src/main/paradox/persistence-journals.md index 93cb9da15a..e5538eb284 100644 --- a/docs/src/main/paradox/persistence-journals.md +++ b/docs/src/main/paradox/persistence-journals.md @@ -59,7 +59,7 @@ The plugin section of the actor system's config will be passed in the config con of the plugin is passed in the `String` parameter. The `plugin-dispatcher` is the dispatcher used for the plugin actor. If not specified, it defaults to -`akka.persistence.dispatchers.default-plugin-dispatcher`. +`pekko.persistence.dispatchers.default-plugin-dispatcher`. Don't run journal tasks/futures on the system default dispatcher, since that might starve other tasks. @@ -91,7 +91,7 @@ The plugin section of the actor system's config will be passed in the config con of the plugin is passed in the `String` parameter. The `plugin-dispatcher` is the dispatcher used for the plugin actor. If not specified, it defaults to -`akka.persistence.dispatchers.default-plugin-dispatcher`. +`pekko.persistence.dispatchers.default-plugin-dispatcher`. Don't run snapshot store tasks/futures on the system default dispatcher, since that might starve other tasks. @@ -104,7 +104,7 @@ The TCK is usable from Java as well as Scala projects. To test your implementati @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-persistence-tck_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/persistence-plugins.md b/docs/src/main/paradox/persistence-plugins.md index cee910f9aa..222003d40d 100644 --- a/docs/src/main/paradox/persistence-plugins.md +++ b/docs/src/main/paradox/persistence-plugins.md @@ -18,9 +18,9 @@ When a persistent actor does NOT override the `journalPluginId` and `snapshotPlu the persistence extension will use the "default" journal, snapshot-store and durable-state plugins configured in `reference.conf`: ``` -akka.persistence.journal.plugin = "" -akka.persistence.snapshot-store.plugin = "" -akka.persistence.state.plugin = "" +pekko.persistence.journal.plugin = "" +pekko.persistence.snapshot-store.plugin = "" +pekko.persistence.state.plugin = "" ``` However, these entries are provided as empty "", and require explicit user configuration via override in the user `application.conf`. @@ -33,25 +33,25 @@ However, these entries are provided as empty "", and require explicit user confi By default, persistence plugins are started on-demand, as they are used. In some case, however, it might be beneficial to start a certain plugin eagerly. In order to do that, you should first add `org.apache.pekko.persistence.Persistence` -under the `akka.extensions` key. Then, specify the IDs of plugins you wish to start automatically under -`akka.persistence.journal.auto-start-journals` and `akka.persistence.snapshot-store.auto-start-snapshot-stores`. +under the `pekko.extensions` key. Then, specify the IDs of plugins you wish to start automatically under +`pekko.persistence.journal.auto-start-journals` and `pekko.persistence.snapshot-store.auto-start-snapshot-stores`. For example, if you want eager initialization for the leveldb journal plugin and the local snapshot store plugin, your configuration should look like this: ``` -akka { +pekko { extensions = [org.apache.pekko.persistence.Persistence] persistence { journal { - plugin = "akka.persistence.journal.leveldb" + plugin = "pekko.persistence.journal.leveldb" auto-start-journals = ["org.apache.pekko.persistence.journal.leveldb"] } snapshot-store { - plugin = "akka.persistence.snapshot-store.local" + plugin = "pekko.persistence.snapshot-store.local" auto-start-snapshot-stores = ["org.apache.pekko.persistence.snapshot-store.local"] } @@ -76,7 +76,7 @@ The LevelDB plugin cannot be used in an Akka Cluster since the storage is in a l The LevelDB journal is deprecated and it is not advised to build new applications with it. As a replacement we recommend using [Akka Persistence JDBC](https://doc.akka.io/docs/akka-persistence-jdbc/current/index.html). -The LevelDB journal plugin config entry is `akka.persistence.journal.leveldb`. Enable this plugin by +The LevelDB journal plugin config entry is `pekko.persistence.journal.leveldb`. Enable this plugin by defining config property: @@snip [PersistencePluginDocSpec.scala](/docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #leveldb-plugin-config } @@ -125,7 +125,7 @@ working directory. The storage location can be changed by configuration: @@snip [PersistencePluginDocSpec.scala](/docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-store-config } -Actor systems that use a shared LevelDB store must activate the `akka.persistence.journal.leveldb-shared` +Actor systems that use a shared LevelDB store must activate the `pekko.persistence.journal.leveldb-shared` plugin. @@snip [PersistencePluginDocSpec.scala](/docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #shared-journal-config } @@ -150,7 +150,7 @@ This plugin writes snapshot files to the local filesystem. The local snapshot store plugin cannot be used in an Akka Cluster since the storage is in a local file system. @@@ -The local snapshot store plugin config entry is `akka.persistence.snapshot-store.local`. +The local snapshot store plugin config entry is `pekko.persistence.snapshot-store.local`. Enable this plugin by defining config property: @@snip [PersistencePluginDocSpec.scala](/docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala) { #leveldb-snapshot-plugin-config } @@ -176,10 +176,10 @@ A shared journal/snapshot store is a single point of failure and should only be purposes. @@@ -The journal and snapshot store proxies are controlled via the `akka.persistence.journal.proxy` and -`akka.persistence.snapshot-store.proxy` configuration entries, respectively. Set the `target-journal-plugin` or +The journal and snapshot store proxies are controlled via the `pekko.persistence.journal.proxy` and +`pekko.persistence.snapshot-store.proxy` configuration entries, respectively. Set the `target-journal-plugin` or `target-snapshot-store-plugin` keys to the underlying plugin you wish to use (for example: -`akka.persistence.journal.inmem`). The `start-target-journal` and `start-target-snapshot-store` keys should be +`pekko.persistence.journal.inmem`). The `start-target-journal` and `start-target-snapshot-store` keys should be set to `on` in exactly one actor system - this is the system that will instantiate the shared persistence plugin. Next, the proxy needs to be told how to find the shared plugin. This can be done by setting the `target-journal-address` and `target-snapshot-store-address` configuration keys, or programmatically by calling the diff --git a/docs/src/main/paradox/persistence-query-leveldb.md b/docs/src/main/paradox/persistence-query-leveldb.md index 34a869a7d0..8302c1728c 100644 --- a/docs/src/main/paradox/persistence-query-leveldb.md +++ b/docs/src/main/paradox/persistence-query-leveldb.md @@ -10,7 +10,7 @@ To use Persistence Query, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-persistence-query_$scala.binary.version$ version=AkkaVersion @@ -154,7 +154,7 @@ backend journal. ## Configuration Configuration settings can be defined in the configuration section with the -absolute path corresponding to the identifier, which is `"akka.persistence.query.journal.leveldb"` +absolute path corresponding to the identifier, which is `"pekko.persistence.query.journal.leveldb"` for the default `LeveldbReadJournal.Identifier`. It can be configured with the following properties: diff --git a/docs/src/main/paradox/persistence-query.md b/docs/src/main/paradox/persistence-query.md index b4d7ba65a0..550a8d9137 100644 --- a/docs/src/main/paradox/persistence-query.md +++ b/docs/src/main/paradox/persistence-query.md @@ -10,7 +10,7 @@ To use Persistence Query, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-persistence-query_$scala.binary.version$ version=AkkaVersion @@ -53,7 +53,7 @@ query types for the most common query scenarios, that most journals are likely t In order to issue queries one has to first obtain an instance of a @apidoc[query.*.ReadJournal]. Read journals are implemented as [Community plugins](https://akka.io/community/#plugins-to-akka-persistence-query), each targeting a specific datastore (for example Cassandra or JDBC -databases). For example, given a library that provides a `akka.persistence.query.my-read-journal` obtaining the related +databases). For example, given a library that provides a `pekko.persistence.query.my-read-journal` obtaining the related journal is as simple as: Scala diff --git a/docs/src/main/paradox/persistence-schema-evolution.md b/docs/src/main/paradox/persistence-schema-evolution.md index 9c1743a0a2..06fe7dd126 100644 --- a/docs/src/main/paradox/persistence-schema-evolution.md +++ b/docs/src/main/paradox/persistence-schema-evolution.md @@ -7,7 +7,7 @@ This documentation page touches upon @ref[Akka Persistence](persistence.md), so @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-persistence_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/persistence.md b/docs/src/main/paradox/persistence.md index f0c4933289..c278aab1a4 100644 --- a/docs/src/main/paradox/persistence.md +++ b/docs/src/main/paradox/persistence.md @@ -13,7 +13,7 @@ To use Akka Persistence, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-persistence_$scala.binary.version$" version=AkkaVersion @@ -134,7 +134,7 @@ to not overload the system and the backend data store. When exceeding the limit until other recoveries have been completed. This is configured by: ``` -akka.persistence.max-concurrent-recoveries = 50 +pekko.persistence.max-concurrent-recoveries = 50 ``` @@@ note @@ -220,7 +220,7 @@ of stashed messages will grow without bounds. It can be wise to protect against maximum stash capacity in the mailbox configuration: ``` -akka.actor.default-mailbox.stash-capacity=10000 +pekko.actor.default-mailbox.stash-capacity=10000 ``` Note that the stash capacity is per actor. If you have many persistent actors, e.g. when using cluster sharding, @@ -234,7 +234,7 @@ for all persistent actors by providing FQCN, which must be a subclass of @apidoc persistence configuration: ``` -akka.persistence.internal-stash-overflow-strategy= +pekko.persistence.internal-stash-overflow-strategy= "org.apache.pekko.persistence.ThrowExceptionConfigurator" ``` @@ -604,7 +604,7 @@ saved snapshot matches the specified `SnapshotSelectionCriteria` will replay all @@@ note -In order to use snapshots, a default snapshot-store (`akka.persistence.snapshot-store.plugin`) must be configured, +In order to use snapshots, a default snapshot-store (`pekko.persistence.snapshot-store.plugin`) must be configured, or the @scala[`PersistentActor`]@java[persistent actor] can pick a snapshot store explicitly by overriding @scala[`def snapshotPluginId: String`]@java[`String snapshotPluginId()`]. Because some use cases may not benefit from or need snapshots, it is perfectly valid not to not configure a snapshot store. @@ -742,28 +742,28 @@ serialization mechanism. It is easiest to include the bytes of the `AtLeastOnceD as a blob in your custom snapshot. The interval between redelivery attempts is defined by the @apidoc[redeliverInterval](persistence.AtLeastOnceDeliveryLike) {scala="#redeliverInterval:scala.concurrent.duration.FiniteDuration" java="#redeliverInterval()"} method. -The default value can be configured with the `akka.persistence.at-least-once-delivery.redeliver-interval` +The default value can be configured with the `pekko.persistence.at-least-once-delivery.redeliver-interval` configuration key. The method can be overridden by implementation classes to return non-default values. The maximum number of messages that will be sent at each redelivery burst is defined by the @apidoc[redeliverBurstLimit](persistence.AtLeastOnceDeliveryLike) {scala="#redeliveryBurstLimit:Int" java="#redeliveryBurstLimit()"} method (burst frequency is half of the redelivery interval). If there's a lot of unconfirmed messages (e.g. if the destination is not available for a long time), this helps to prevent an overwhelming amount of messages to be sent at once. The default value can be configured with the -`akka.persistence.at-least-once-delivery.redelivery-burst-limit` configuration key. The method can be overridden +`pekko.persistence.at-least-once-delivery.redelivery-burst-limit` configuration key. The method can be overridden by implementation classes to return non-default values. After a number of delivery attempts a @apidoc[persistence.AtLeastOnceDelivery.UnconfirmedWarning] message will be sent to `self`. The re-sending will still continue, but you can choose to call `confirmDelivery` to cancel the re-sending. The number of delivery attempts before emitting the warning is defined by the @apidoc[warnAfterNumberOfUnconfirmedAttempts](persistence.AtLeastOnceDeliveryLike) {scala="#warnAfterNumberOfUnconfirmedAttempts:Int" java="#warnAfterNumberOfUnconfirmedAttempts()"} method. The default value can be -configured with the `akka.persistence.at-least-once-delivery.warn-after-number-of-unconfirmed-attempts` +configured with the `pekko.persistence.at-least-once-delivery.warn-after-number-of-unconfirmed-attempts` configuration key. The method can be overridden by implementation classes to return non-default values. The @scala[@scaladoc[AtLeastOnceDelivery](pekko.persistence.AtLeastOnceDelivery) trait]@java[@javadoc[AbstractPersistentActorWithAtLeastOnceDelivery](pekko.persistence.AbstractPersistentActorWithAtLeastOnceDelivery) class] holds messages in memory until their successful delivery has been confirmed. The maximum number of unconfirmed messages that the actor is allowed to hold in memory is defined by the @apidoc[maxUnconfirmedMessages](persistence.AtLeastOnceDeliveryLike) {scala="#maxUnconfirmedMessages:Int" java="#maxUnconfirmedMessages()"} method. If this limit is exceed the `deliver` method will not accept more messages and it will throw @apidoc[AtLeastOnceDelivery.MaxUnconfirmedMessagesExceededException]. -The default value can be configured with the `akka.persistence.at-least-once-delivery.max-unconfirmed-messages` +The default value can be configured with the `pekko.persistence.at-least-once-delivery.max-unconfirmed-messages` configuration key. The method can be overridden by implementation classes to return non-default values. ## Event Adapters diff --git a/docs/src/main/paradox/project/links.md b/docs/src/main/paradox/project/links.md index 8f6e568737..3d29d297ee 100644 --- a/docs/src/main/paradox/project/links.md +++ b/docs/src/main/paradox/project/links.md @@ -13,7 +13,7 @@ It also provides the Lightbend Reactive Platform, which is powered by an open so ## Akka Discuss Forums -[Akka Discuss Forums](https://discuss.akka.io) +[Akka Discuss Forums](https://discuss.pekko.io) ## Gitter diff --git a/docs/src/main/paradox/project/migration-guide-2.5.x-2.6.x.md b/docs/src/main/paradox/project/migration-guide-2.5.x-2.6.x.md index 49821a9390..82d43fc0fa 100644 --- a/docs/src/main/paradox/project/migration-guide-2.5.x-2.6.x.md +++ b/docs/src/main/paradox/project/migration-guide-2.5.x-2.6.x.md @@ -31,7 +31,7 @@ If you are still using Scala 2.11 then you must upgrade to 2.12 or 2.13 Auto-downing of unreachable Cluster members have been removed after warnings and recommendations against using it for many years. It was by default disabled, but could be enabled with configuration -`akka.cluster.auto-down-unreachable-after`. +`pekko.cluster.auto-down-unreachable-after`. For alternatives see the @ref:[documentation about Downing](../typed/cluster.md#downing). @@ -122,7 +122,7 @@ After being deprecated since 2.2, the following have been removed in Akka 2.6.0. ### TypedActor `org.apache.pekko.actor.TypedActor` has been deprecated as of 2.6.0 in favor of the -`akka.actor.typed` API which should be used instead. +`pekko.actor.typed` API which should be used instead. There are several reasons for phasing out the old `TypedActor`. The primary reason is they use transparent remoting which is not our recommended way of implementing and interacting with actors. Transparent remoting @@ -225,7 +225,7 @@ misconfiguration. You can run Artery on 2552 if you prefer that (e.g. existing f have to configure the port with: ``` -akka.remote.artery.canonical.port = 2552 +pekko.remote.artery.canonical.port = 2552 ``` The configuration for Artery is different, so you might have to revisit any custom configuration. See the full @@ -234,8 +234,8 @@ The configuration for Artery is different, so you might have to revisit any cust Configuration that is likely required to be ported: -* `akka.remote.netty.tcp.hostname` => `akka.remote.artery.canonical.hostname` -* `akka.remote.netty.tcp.port`=> `akka.remote.artery.canonical.port` +* `pekko.remote.netty.tcp.hostname` => `pekko.remote.artery.canonical.hostname` +* `pekko.remote.netty.tcp.port`=> `pekko.remote.artery.canonical.port` If using SSL then `tcp-tls` needs to be enabled and setup. See @ref[Artery docs for SSL](../remoting-artery.md#configuring-ssl-tls-for-akka-remoting) for how to do this. @@ -250,25 +250,25 @@ The following events that are published to the `eventStream` have changed: The following defaults have changed: -* `akka.remote.artery.transport` default has changed from `aeron-udp` to `tcp` +* `pekko.remote.artery.transport` default has changed from `aeron-udp` to `tcp` The following properties have moved. If you don't adjust these from their defaults no changes are required: For Aeron-UDP: -* `akka.remote.artery.log-aeron-counters` to `akka.remote.artery.advanced.aeron.log-aeron-counters` -* `akka.remote.artery.advanced.embedded-media-driver` to `akka.remote.artery.advanced.aeron.embedded-media-driver` -* `akka.remote.artery.advanced.aeron-dir` to `akka.remote.artery.advanced.aeron.aeron-dir` -* `akka.remote.artery.advanced.delete-aeron-dir` to `akka.remote.artery.advanced.aeron.aeron-delete-dir` -* `akka.remote.artery.advanced.idle-cpu-level` to `akka.remote.artery.advanced.aeron.idle-cpu-level` -* `akka.remote.artery.advanced.give-up-message-after` to `akka.remote.artery.advanced.aeron.give-up-message-after` -* `akka.remote.artery.advanced.client-liveness-timeout` to `akka.remote.artery.advanced.aeron.client-liveness-timeout` -* `akka.remote.artery.advanced.image-liveless-timeout` to `akka.remote.artery.advanced.aeron.image-liveness-timeout` -* `akka.remote.artery.advanced.driver-timeout` to `akka.remote.artery.advanced.aeron.driver-timeout` +* `pekko.remote.artery.log-aeron-counters` to `pekko.remote.artery.advanced.aeron.log-aeron-counters` +* `pekko.remote.artery.advanced.embedded-media-driver` to `pekko.remote.artery.advanced.aeron.embedded-media-driver` +* `pekko.remote.artery.advanced.aeron-dir` to `pekko.remote.artery.advanced.aeron.aeron-dir` +* `pekko.remote.artery.advanced.delete-aeron-dir` to `pekko.remote.artery.advanced.aeron.aeron-delete-dir` +* `pekko.remote.artery.advanced.idle-cpu-level` to `pekko.remote.artery.advanced.aeron.idle-cpu-level` +* `pekko.remote.artery.advanced.give-up-message-after` to `pekko.remote.artery.advanced.aeron.give-up-message-after` +* `pekko.remote.artery.advanced.client-liveness-timeout` to `pekko.remote.artery.advanced.aeron.client-liveness-timeout` +* `pekko.remote.artery.advanced.image-liveless-timeout` to `pekko.remote.artery.advanced.aeron.image-liveness-timeout` +* `pekko.remote.artery.advanced.driver-timeout` to `pekko.remote.artery.advanced.aeron.driver-timeout` For TCP: -* `akka.remote.artery.advanced.connection-timeout` to `akka.remote.artery.advanced.tcp.connection-timeout` +* `pekko.remote.artery.advanced.connection-timeout` to `pekko.remote.artery.advanced.tcp.connection-timeout` #### Remaining with Classic remoting (not recommended) @@ -278,8 +278,8 @@ not supported so if you want to update from Akka 2.5.x with Classic remoting to down of the Cluster you have to enable Classic remoting. Later, you can plan for a full shutdown and @ref:[migrate from classic remoting to Artery](#migrating-from-classic-remoting-to-artery) as a separate step. -Explicitly disable Artery by setting property `akka.remote.artery.enabled` to `false`. Further, any configuration under `akka.remote` that is -specific to classic remoting needs to be moved to `akka.remote.classic`. To see which configuration options +Explicitly disable Artery by setting property `pekko.remote.artery.enabled` to `false`. Further, any configuration under `pekko.remote` that is +specific to classic remoting needs to be moved to `pekko.remote.classic`. To see which configuration options are specific to classic search for them in: @ref:[`akka-remote/reference.conf`](../general/configuration-reference.md#config-akka-remote). If you have a [Lightbend Subscription](https://www.lightbend.com/lightbend-subscription) you can use our [Config Checker](https://doc.akka.io/docs/akka-enhancements/current/config-checker.html) enhancement to flag any settings that have not been properly migrated. @@ -306,13 +306,13 @@ recommendation if you don't have other preferences or constraints. For compatibility with older systems that rely on Java serialization it can be enabled with the following configuration: ```ruby -akka.actor.allow-java-serialization = on +pekko.actor.allow-java-serialization = on ``` Akka will still log warning when Java serialization is used and to silent that you may add: ```ruby -akka.actor.warn-about-java-serializer-usage = off +pekko.actor.warn-about-java-serializer-usage = off ``` ### Rolling update @@ -355,7 +355,7 @@ will log a warning and be ignored, it must be done after the node has joined. To optionally enable a watch without Akka Cluster or across a Cluster boundary between Cluster and non Cluster, knowing the consequences, all watchers (cluster as well as remote) need to set: ``` -akka.remote.use-unsafe-remote-features-outside-cluster = on`. +pekko.remote.use-unsafe-remote-features-outside-cluster = on`. ``` When enabled @@ -363,7 +363,7 @@ When enabled * An initial warning is logged on startup of `RemoteActorRefProvider` * A warning will be logged on remote watch attempts, which you can suppress by setting ``` -akka.remote.warn-unsafe-watch-outside-cluster = off +pekko.remote.warn-unsafe-watch-outside-cluster = off ``` ### Schedule periodically with fixed-delay vs. fixed-rate @@ -391,16 +391,16 @@ To protect the Akka internals against starvation when user code blocks the defau use of blocking APIs from actors) a new internal dispatcher has been added. All of Akka's internal, non-blocking actors now run on the internal dispatcher by default. -The dispatcher can be configured through `akka.actor.internal-dispatcher`. +The dispatcher can be configured through `pekko.actor.internal-dispatcher`. For maximum performance, you might want to use a single shared dispatcher for all non-blocking, asynchronous actors, user actors and Akka internal actors. In that case, you can configure the -`akka.actor.internal-dispatcher` with a string value of `akka.actor.default-dispatcher`. +`pekko.actor.internal-dispatcher` with a string value of `pekko.actor.default-dispatcher`. This reinstantiates the behavior from previous Akka versions but also removes the isolation between user and Akka internals. So, use at your own risk! Several `use-dispatcher` configuration settings that previously accepted an empty value to fall back to the default -dispatcher has now gotten an explicit value of `akka.actor.internal-dispatcher` and no longer accept an empty +dispatcher has now gotten an explicit value of `pekko.actor.internal-dispatcher` and no longer accept an empty string as value. If such an empty value is used in your `application.conf` the same result is achieved by simply removing that entry completely and having the default apply. @@ -411,7 +411,7 @@ For more details about configuring dispatchers, see the @ref[Dispatchers](../dis Previously the factor for the default dispatcher was set a bit high (`3.0`) to give some extra threads in case of accidental blocking and protect a bit against starving the internal actors. Since the internal actors are now on a separate dispatcher the default dispatcher has been adjusted down to `1.0` which means the number of threads will be one per core, but at least -`8` and at most `64`. This can be tuned using the individual settings in `akka.actor.default-dispatcher.fork-join-executor`. +`8` and at most `64`. This can be tuned using the individual settings in `pekko.actor.default-dispatcher.fork-join-executor`. ### Mixed version @@ -429,12 +429,12 @@ so it is more likely to timeout if there are nodes restarting, for example when #### Passivate idle entity -The configuration `akka.cluster.sharding.passivate-idle-entity-after` is now enabled by default. +The configuration `pekko.cluster.sharding.passivate-idle-entity-after` is now enabled by default. Sharding will passivate entities when they have not received any messages after this duration. To disable passivation you can use configuration: ``` -akka.cluster.sharding.passivate-idle-entity-after = off +pekko.cluster.sharding.passivate-idle-entity-after = off ``` It is always disabled if @ref:[Remembering Entities](../cluster-sharding.md#remembering-entities) is enabled. @@ -442,7 +442,7 @@ It is always disabled if @ref:[Remembering Entities](../cluster-sharding.md#reme #### Cluster Sharding stats A new field has been added to the response of a `ShardRegion.GetClusterShardingStats` command -for any shards per region that may have failed or not responded within the new configurable `akka.cluster.sharding.shard-region-query-timeout`. +for any shards per region that may have failed or not responded within the new configurable `pekko.cluster.sharding.shard-region-query-timeout`. This is described further in @ref:[inspecting sharding state](../cluster-sharding.md#inspecting-cluster-sharding-state). ### Distributed Data @@ -456,8 +456,8 @@ actor messages. The new configuration properties are: ``` -akka.cluster.distributed-data.max-delta-elements = 500 -akka.cluster.distributed-data.delta-crdt.max-delta-size = 50 +pekko.cluster.distributed-data.max-delta-elements = 500 +pekko.cluster.distributed-data.delta-crdt.max-delta-size = 50 ``` #### DataDeleted @@ -483,7 +483,7 @@ If this is not desired behavior, for example in tests, you can disable this feat and then it will behave as in Akka 2.5.x: ``` -akka.coordinated-shutdown.run-by-actor-system-terminate = off +pekko.coordinated-shutdown.run-by-actor-system-terminate = off ``` ### Scheduler not running tasks when shutdown @@ -511,10 +511,10 @@ keeping our own copy, so from Akka 2.6.0 on, the default FJP from the JDK will b ### Logging of dead letters -When the number of dead letters have reached configured `akka.log-dead-letters` value it didn't log -more dead letters in Akka 2.5.x. In Akka 2.6.x the count is reset after configured `akka.log-dead-letters-suspend-duration`. +When the number of dead letters have reached configured `pekko.log-dead-letters` value it didn't log +more dead letters in Akka 2.5.x. In Akka 2.6.x the count is reset after configured `pekko.log-dead-letters-suspend-duration`. -`akka.log-dead-letters-during-shutdown` default configuration changed from `on` to `off`. +`pekko.log-dead-letters-during-shutdown` default configuration changed from `on` to `off`. ### Cluster failure detection @@ -524,13 +524,13 @@ The reason is to have better coverage and unreachability information for downing Configuration property: ``` -akka.cluster.monitored-by-nr-of-members = 9 +pekko.cluster.monitored-by-nr-of-members = 9 ``` ### TestKit `expectNoMessage()` without timeout parameter is now using a new configuration property -`akka.test.expect-no-message-default` (short timeout) instead of `remainingOrDefault` (long timeout). +`pekko.test.expect-no-message-default` (short timeout) instead of `remainingOrDefault` (long timeout). ### Config library resolution change @@ -542,34 +542,34 @@ For example, the default config for Cluster Sharding, refers to the default conf `reference.conf` like this: ```ruby -akka.cluster.sharding.distributed-data = ${akka.cluster.distributed-data} +pekko.cluster.sharding.distributed-data = ${pekko.cluster.distributed-data} ``` In Akka 2.5.x this meant that to override default gossip interval for both direct use of Distributed Data and Cluster Sharding in the same application you would have to change two settings: ```ruby -akka.cluster.distributed-data.gossip-interval = 3s -akka.cluster.sharding.distributed-data = 3s +pekko.cluster.distributed-data.gossip-interval = 3s +pekko.cluster.sharding.distributed-data = 3s ``` -In Akka 2.6.0 and forward, changing the default in the `akka.cluster.distributed-data` config block will be done before +In Akka 2.6.0 and forward, changing the default in the `pekko.cluster.distributed-data` config block will be done before the variable in `reference.conf` is resolved, so that the same change only needs to be done once: ```ruby -akka.cluster.distributed-data.gossip-interval = 3s +pekko.cluster.distributed-data.gossip-interval = 3s ``` The following default settings in Akka are using such substitution and may be affected if you are changing the right hand config path in your `application.conf`: ```ruby -akka.cluster.sharding.coordinator-singleton = ${akka.cluster.singleton} -akka.cluster.sharding.distributed-data = ${akka.cluster.distributed-data} -akka.cluster.singleton-proxy.singleton-name = ${akka.cluster.singleton.singleton-name} -akka.cluster.typed.receptionist.distributed-data = ${akka.cluster.distributed-data} -akka.remote.classic.netty.ssl = ${akka.remote.classic.netty.tcp} -akka.remote.artery.advanced.materializer = ${akka.stream.materializer} +pekko.cluster.sharding.coordinator-singleton = ${pekko.cluster.singleton} +pekko.cluster.sharding.distributed-data = ${pekko.cluster.distributed-data} +pekko.cluster.singleton-proxy.singleton-name = ${pekko.cluster.singleton.singleton-name} +pekko.cluster.typed.receptionist.distributed-data = ${pekko.cluster.distributed-data} +pekko.remote.classic.netty.ssl = ${pekko.remote.classic.netty.tcp} +pekko.remote.artery.advanced.materializer = ${pekko.stream.materializer} ``` @@ -695,32 +695,32 @@ used for individual streams when they are materialized. | MaterializerSettings | Corresponding attribute | Config | -------------------------|---------------------------------------------------|---------| -| `initialInputBufferSize` | `Attributes.inputBuffer(initial, max)` | `akka.stream.materializer.initial-input-buffer-size` | -| `maxInputBufferSize` | `Attributes.inputBuffer(initial, max)` | `akka.stream.materializer.max-input-buffer-size` | -| `dispatcher` | `ActorAttributes.dispatcher(name)` | `akka.stream.materializer.dispatcher` | +| `initialInputBufferSize` | `Attributes.inputBuffer(initial, max)` | `pekko.stream.materializer.initial-input-buffer-size` | +| `maxInputBufferSize` | `Attributes.inputBuffer(initial, max)` | `pekko.stream.materializer.max-input-buffer-size` | +| `dispatcher` | `ActorAttributes.dispatcher(name)` | `pekko.stream.materializer.dispatcher` | | `supervisionDecider` | `ActorAttributes.supervisionStrategy` | na | -| `debugLogging` | `ActorAttributes.debugLogging` | `akka.stream.materializer.debug-logging` | -| `outputBurstLimit` | `ActorAttributes.outputBurstLimit` | `akka.stream.materializer.output-burst-limit` | -| `fuzzingMode` | `ActorAttributes.fuzzingMode` | `akka.stream.materializer.debug.fuzzing-mode` | +| `debugLogging` | `ActorAttributes.debugLogging` | `pekko.stream.materializer.debug-logging` | +| `outputBurstLimit` | `ActorAttributes.outputBurstLimit` | `pekko.stream.materializer.output-burst-limit` | +| `fuzzingMode` | `ActorAttributes.fuzzingMode` | `pekko.stream.materializer.debug.fuzzing-mode` | | `autoFusing` | no longer used (since 2.5.0) | na | -| `maxFixedBufferSize` | `ActorAttributes.maxFixedBufferSize` | `akka.stream.materializer.max-fixed-buffer-size` | -| `syncProcessingLimit` | `ActorAttributes.syncProcessingLimit` | `akka.stream.materializer.sync-processing-limit` | -| `IOSettings.tcpWriteBufferSize` | `Tcp.writeBufferSize` | `akka.stream.materializer.io.tcp.write-buffer-size` | -| `blockingIoDispatcher` | na | `akka.stream.materializer.blocking-io-dispatcher` | +| `maxFixedBufferSize` | `ActorAttributes.maxFixedBufferSize` | `pekko.stream.materializer.max-fixed-buffer-size` | +| `syncProcessingLimit` | `ActorAttributes.syncProcessingLimit` | `pekko.stream.materializer.sync-processing-limit` | +| `IOSettings.tcpWriteBufferSize` | `Tcp.writeBufferSize` | `pekko.stream.materializer.io.tcp.write-buffer-size` | +| `blockingIoDispatcher` | na | `pekko.stream.materializer.blocking-io-dispatcher` | | StreamRefSettings | Corresponding StreamRefAttributes | Config | -----------------------------------|-----------------------------------|---------| -| `bufferCapacity` | `bufferCapacity` | `akka.stream.materializer.stream-ref.buffer-capacity` | -| `demandRedeliveryInterval` | `demandRedeliveryInterval` | `akka.stream.materializer.stream-ref.demand-redelivery-interval` | -| `subscriptionTimeout` | `subscriptionTimeout` | `akka.stream.materializer.stream-ref.subscription-timeout` | -| `finalTerminationSignalDeadline` | `finalTerminationSignalDeadline` | `akka.stream.materializer.stream-ref.final-termination-signal-deadline` | +| `bufferCapacity` | `bufferCapacity` | `pekko.stream.materializer.stream-ref.buffer-capacity` | +| `demandRedeliveryInterval` | `demandRedeliveryInterval` | `pekko.stream.materializer.stream-ref.demand-redelivery-interval` | +| `subscriptionTimeout` | `subscriptionTimeout` | `pekko.stream.materializer.stream-ref.subscription-timeout` | +| `finalTerminationSignalDeadline` | `finalTerminationSignalDeadline` | `pekko.stream.materializer.stream-ref.final-termination-signal-deadline` | | SubscriptionTimeoutSettings | Corresponding ActorAttributes | Config | -----------------------------------|---------------------------------------------|---------| -| `subscriptionTimeoutSettings.mode` | `streamSubscriptionTimeoutMode` | `akka.stream.materializer.subscription-timeout.mode` | -| `subscriptionTimeoutSettings.timeout` | `streamSubscriptionTimeout` | `akka.stream.materializer.subscription-timeout.timeout` | +| `subscriptionTimeoutSettings.mode` | `streamSubscriptionTimeoutMode` | `pekko.stream.materializer.subscription-timeout.mode` | +| `subscriptionTimeoutSettings.timeout` | `streamSubscriptionTimeout` | `pekko.stream.materializer.subscription-timeout.timeout` | Setting attributes on individual streams can be done like so: diff --git a/docs/src/main/paradox/project/rolling-update.md b/docs/src/main/paradox/project/rolling-update.md index 016d090d27..3de2010791 100644 --- a/docs/src/main/paradox/project/rolling-update.md +++ b/docs/src/main/paradox/project/rolling-update.md @@ -104,7 +104,7 @@ You can start using CBOR format already with Akka 2.6.5 without waiting for the a rolling update to Akka 2.6.5 using default configuration. Then change the configuration to: ``` -akka.actor { +pekko.actor { serializers { jackson-cbor = "org.apache.pekko.serialization.jackson.JacksonCborSerializer" } diff --git a/docs/src/main/paradox/remoting-artery.md b/docs/src/main/paradox/remoting-artery.md index a12d442fe9..54c1ace80f 100644 --- a/docs/src/main/paradox/remoting-artery.md +++ b/docs/src/main/paradox/remoting-artery.md @@ -25,7 +25,7 @@ To use Artery Remoting, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-remote_$scala.binary.version$ version=AkkaVersion @@ -49,7 +49,7 @@ To enable remote capabilities in your Akka project you should, at a minimum, add to your `application.conf` file: ``` -akka { +pekko { actor { # provider=remote is possible, but prefer cluster provider = cluster @@ -117,7 +117,7 @@ acts as a "server" to which arbitrary systems on the same network can connect to ## Selecting a transport There are three alternatives of which underlying transport to use. It is configured by property -`akka.remote.artery.transport` with the possible values: +`pekko.remote.artery.transport` with the possible values: * `tcp` - Based on @ref:[Akka Streams TCP](stream/stream-io.md#streaming-tcp) (default if other not configured) * `tls-tcp` - Same as `tcp` with encryption using @ref:[Akka Streams TLS](stream/stream-io.md#tls) @@ -277,7 +277,7 @@ In addition to what is described here, read the blog post about [Securing Akka c SSL can be used as the remote transport by using the `tls-tcp` transport: ``` -akka.remote.artery { +pekko.remote.artery { transport = tls-tcp } ``` @@ -285,7 +285,7 @@ akka.remote.artery { Next the actual SSL/TLS parameters have to be configured: ``` -akka.remote.artery { +pekko.remote.artery { transport = tls-tcp ssl.config-ssl-engine { @@ -336,7 +336,7 @@ Note that if TLS is enabled with mutual authentication there is still a risk tha valid certificate by compromising any node with certificates issued by the same internal PKI tree. It's recommended that you enable hostname verification with -`akka.remote.artery.ssl.config-ssl-engine.hostname-verification=on`. +`pekko.remote.artery.ssl.config-ssl-engine.hostname-verification=on`. When enabled it will verify that the destination hostname matches the hostname in the peer's certificate. In deployments where hostnames are dynamic and not known up front it can make sense to leave the hostname verification off. @@ -390,7 +390,7 @@ that system down. This is not always desired, and it can be disabled with the following setting: ``` -akka.remote.artery.untrusted-mode = on +pekko.remote.artery.untrusted-mode = on ``` This disallows sending of system messages (actor life-cycle commands, @@ -417,7 +417,7 @@ permission to receive actor selection messages can be granted to specific actors defined in configuration: ``` -akka.remote.artery.trusted-selection-paths = ["/user/receptionist", "/user/namingService"] +pekko.remote.artery.trusted-selection-paths = ["/user/receptionist", "/user/namingService"] ``` @@ -530,7 +530,7 @@ phi = -log10(1 - F(timeSinceLastHeartbeat)) where F is the cumulative distribution function of a normal distribution with mean and standard deviation estimated from historical heartbeat inter-arrival times. -In the @ref:[Remote Configuration](#remote-configuration-artery) you can adjust the `akka.remote.watch-failure-detector.threshold` +In the @ref:[Remote Configuration](#remote-configuration-artery) you can adjust the `pekko.remote.watch-failure-detector.threshold` to define when a *phi* value is considered to be a failure. A low `threshold` is prone to generate many false positives but ensures @@ -555,7 +555,7 @@ a standard deviation of 100 ms. To be able to survive sudden abnormalities, such as garbage collection pauses and transient network failures the failure detector is configured with a margin, -`akka.remote.watch-failure-detector.acceptable-heartbeat-pause`. You may want to +`pekko.remote.watch-failure-detector.acceptable-heartbeat-pause`. You may want to adjust the @ref:[Remote Configuration](#remote-configuration-artery) of this depending on you environment. This is how the curve looks like for `acceptable-heartbeat-pause` configured to 3 seconds. @@ -679,7 +679,7 @@ arrive in send order. It is possible to assign actors on given paths to use this path patterns that have to be specified in the actor system's configuration on both the sending and the receiving side: ``` -akka.remote.artery.large-message-destinations = [ +pekko.remote.artery.large-message-destinations = [ "/user/largeMessageActor", "/user/largeMessagesGroup/*", "/user/anotherGroup/*/largeMesssages", @@ -706,7 +706,7 @@ To notice large messages you can enable logging of message types with payload si configured `log-frame-size-exceeding`. ``` -akka.remote.artery { +pekko.remote.artery { log-frame-size-exceeding = 10000b } ``` @@ -780,7 +780,7 @@ aeron.threading.mode=SHARED_NETWORK #aeron.sender.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy #aeron.receiver.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy -# use same director in akka.remote.artery.advanced.aeron-dir config +# use same director in pekko.remote.artery.advanced.aeron-dir config # of the Akka application aeron.dir=/dev/shm/aeron ``` @@ -791,7 +791,7 @@ To use the external media driver from the Akka application you need to define th configuration properties: ``` -akka.remote.artery.advanced.aeron { +pekko.remote.artery.advanced.aeron { embedded-media-driver = off aeron-dir = /dev/shm/aeron } @@ -817,7 +817,7 @@ usage and latency with the following configuration: ``` # Values can be from 1 to 10, where 10 strongly prefers low latency # and 1 strongly prefers less CPU usage -akka.remote.artery.advanced.aeron.idle-cpu-level = 1 +pekko.remote.artery.advanced.aeron.idle-cpu-level = 1 ``` By setting this value to a lower number, it tells Akka to do longer "sleeping" periods on its thread dedicated @@ -851,7 +851,7 @@ host name and port pair that is used to connect to the system from the outside. special configuration that sets both the logical and the bind pairs for remoting. ``` -akka { +pekko { remote { artery { canonical.hostname = my.domain.com # external (logical) hostname @@ -902,7 +902,7 @@ Any space used in the mount will count towards your container's memory usage. ### Flight Recorder When running on JDK 11 Artery specific flight recording is available through the [Java Flight Recorder (JFR)](https://openjdk.java.net/jeps/328). -The flight recorder is automatically enabled by detecting JDK 11 but can be disabled if needed by setting `akka.java-flight-recorder.enabled = false`. +The flight recorder is automatically enabled by detecting JDK 11 but can be disabled if needed by setting `pekko.java-flight-recorder.enabled = false`. Low overhead Artery specific events are emitted by default when JFR is enabled, higher overhead events needs a custom settings template and are not enabled automatically with the `profiling` JFR template. To enable those create a copy of the `profiling` template and enable all `Akka` sub category events, for example through the JMC GUI. diff --git a/docs/src/main/paradox/remoting.md b/docs/src/main/paradox/remoting.md index a956e7bcdf..fd402e9a46 100644 --- a/docs/src/main/paradox/remoting.md +++ b/docs/src/main/paradox/remoting.md @@ -27,7 +27,7 @@ To use Akka Remoting, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-remote_$scala.binary.version$ version=AkkaVersion @@ -50,14 +50,14 @@ To enable classic remoting in your Akka project you should, at a minimum, add th to your `application.conf` file: ``` -akka { +pekko { actor { # provider=remote is possible, but prefer cluster provider = cluster } remote.artery.enabled = false remote.classic { - enabled-transports = ["akka.remote.classic.netty.tcp"] + enabled-transports = ["pekko.remote.classic.netty.tcp"] netty.tcp { hostname = "127.0.0.1" port = 2552 @@ -186,7 +186,7 @@ If you want to use the creation functionality in Akka remoting you have to furth `application.conf` file in the following way (only showing deployment section): ``` -akka { +pekko { actor { deployment { /sampleActor { @@ -226,7 +226,7 @@ companion object of the actor’s class]@java[make a static inner class which implements `Creator`]. Serializability of all Props can be tested by setting the configuration item -`akka.actor.serialize-creators=on`. Only Props whose `deploy` has +`pekko.actor.serialize-creators=on`. Only Props whose `deploy` has `LocalScope` are exempt from this check. @@@ @@ -282,7 +282,7 @@ is *not* remote code loading, the Actors class to be deployed onto a remote syst remote system. This still however may pose a security risk, and one may want to restrict remote deployment to only a specific set of known actors by enabling the allow list feature. -To enable remote deployment allow list set the `akka.remote.deployment.enable-allow-list` value to `on`. +To enable remote deployment allow list set the `pekko.remote.deployment.enable-allow-list` value to `on`. The list of allowed classes has to be configured on the "remote" system, in other words on the system onto which others will be attempting to remote deploy Actors. That system, locally, knows best which Actors it should or should not allow others to remote deploy onto it. The full settings section may for example look like this: @@ -302,7 +302,7 @@ is attempted to be sent to the remote system or an inbound connection is accepte When a communication failure happens and the connection is lost between the two systems the link becomes `Gated`. In this state the system will not attempt to connect to the remote host and all outbound messages will be dropped. The time -while the link is in the `Gated` state is controlled by the setting `akka.remote.retry-gate-closed-for`: +while the link is in the `Gated` state is controlled by the setting `pekko.remote.retry-gate-closed-for`: after this time elapses the link state transitions to `Idle` again. `Gate` is one-sided in the sense that whenever a successful *inbound* connection is accepted from a remote system during `Gate` it automatically transitions to `Active` and communication resumes immediately. @@ -329,14 +329,14 @@ Remoting uses the `org.apache.pekko.remote.PhiAccrualFailureDetector` failure de implementing the `org.apache.pekko.remote.FailureDetector` and configuring it: ``` -akka.remote.watch-failure-detector.implementation-class = "com.example.CustomFailureDetector" +pekko.remote.watch-failure-detector.implementation-class = "com.example.CustomFailureDetector" ``` In the @ref:[Remote Configuration](#remote-configuration) you may want to adjust these depending on you environment: -* When a *phi* value is considered to be a failure `akka.remote.watch-failure-detector.threshold` -* Margin of error for sudden abnormalities `akka.remote.watch-failure-detector.acceptable-heartbeat-pause` +* When a *phi* value is considered to be a failure `pekko.remote.watch-failure-detector.threshold` +* Margin of error for sudden abnormalities `pekko.remote.watch-failure-detector.acceptable-heartbeat-pause` ## Serialization @@ -397,7 +397,7 @@ finished. @@@ note In order to disable the logging, set -`akka.remote.classic.log-remote-lifecycle-events = off` in your +`pekko.remote.classic.log-remote-lifecycle-events = off` in your `application.conf`. @@@ @@ -439,13 +439,13 @@ That is also security best-practice because of its multiple ### Configuring SSL/TLS for Akka Remoting -SSL can be used as the remote transport by adding `akka.remote.classic.netty.ssl` to the `enabled-transport` configuration section. +SSL can be used as the remote transport by adding `pekko.remote.classic.netty.ssl` to the `enabled-transport` configuration section. An example of setting up the default Netty based SSL driver as default: ``` -akka { +pekko { remote.classic { - enabled-transports = [akka.remote.classic.netty.ssl] + enabled-transports = [pekko.remote.classic.netty.ssl] } } ``` @@ -453,7 +453,7 @@ akka { Next the actual SSL/TLS parameters have to be configured: ``` -akka { +pekko { remote.classic { netty.ssl { hostname = "127.0.0.1" @@ -528,7 +528,7 @@ that system down. This is not always desired, and it can be disabled with the following setting: ``` -akka.remote.classic.untrusted-mode = on +pekko.remote.classic.untrusted-mode = on ``` This disallows sending of system messages (actor life-cycle commands, @@ -555,7 +555,7 @@ permission to receive actor selection messages can be granted to specific actors defined in configuration: ``` -akka.remote.classic.trusted-selection-paths = ["/user/receptionist", "/user/namingService"] +pekko.remote.classic.trusted-selection-paths = ["/user/receptionist", "/user/namingService"] ``` The actual message must still not be of type `PossiblyHarmful`. @@ -608,7 +608,7 @@ host name and port pair that is used to connect to the system from the outside. special configuration that sets both the logical and the bind pairs for remoting. ``` -akka.remote.classic.netty.tcp { +pekko.remote.classic.netty.tcp { hostname = my.domain.com # external (logical) hostname port = 8000 # external (logical) port diff --git a/docs/src/main/paradox/routing.md b/docs/src/main/paradox/routing.md index 7f41221036..f65954f12e 100644 --- a/docs/src/main/paradox/routing.md +++ b/docs/src/main/paradox/routing.md @@ -10,7 +10,7 @@ To use Routing, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion @@ -823,7 +823,7 @@ Scala Java : @@snip [RouterDocTest.java](/docs/src/test/java/jdocs/routing/RouterDocTest.java) { #resize-pool-1 } -Several more configuration options are available and described in `akka.actor.deployment.default.resizer` +Several more configuration options are available and described in `pekko.actor.deployment.default.resizer` section of the reference @ref:[configuration](general/configuration.md). Pool with resizer defined in code: @@ -873,7 +873,7 @@ Scala Java : @@snip [RouterDocTest.java](/docs/src/test/java/jdocs/routing/RouterDocTest.java) { #optimal-size-exploring-resize-pool } -Several more configuration options are available and described in `akka.actor.deployment.default.optimal-size-exploring-resizer` +Several more configuration options are available and described in `pekko.actor.deployment.default.optimal-size-exploring-resizer` section of the reference @ref:[configuration](general/configuration.md). @@@ note diff --git a/docs/src/main/paradox/scheduler.md b/docs/src/main/paradox/scheduler.md index 5f26c9e50b..54196bd177 100644 --- a/docs/src/main/paradox/scheduler.md +++ b/docs/src/main/paradox/scheduler.md @@ -13,7 +13,7 @@ To use Scheduler, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion @@ -55,7 +55,7 @@ The default implementation of @apidoc[actor.Scheduler] used by Akka is based on buckets which are emptied according to a fixed schedule. It does not execute tasks at the exact time, but on every tick, it will run everything that is (over)due. The accuracy of the default Scheduler can be modified -by the `akka.scheduler.tick-duration` configuration property. +by the `pekko.scheduler.tick-duration` configuration property. @@@ @@ -156,7 +156,7 @@ which may in worst case cause undesired load on the system. `scheduleWithFixedDe The actual scheduler implementation is loaded reflectively upon @apidoc[actor.ActorSystem] start-up, which means that it is possible to provide a -different one using the `akka.scheduler.implementation` configuration +different one using the `pekko.scheduler.implementation` configuration property. The referenced class must implement the @scala[@apidoc[actor.Scheduler]]@java[@apidoc[actor.AbstractScheduler]] interface. diff --git a/docs/src/main/paradox/security/2017-02-10-java-serialization.md b/docs/src/main/paradox/security/2017-02-10-java-serialization.md index 97568dbc75..3f89fbc352 100644 --- a/docs/src/main/paradox/security/2017-02-10-java-serialization.md +++ b/docs/src/main/paradox/security/2017-02-10-java-serialization.md @@ -10,7 +10,7 @@ An attacker that can connect to an `ActorSystem` exposed via Akka Remote over TC capabilities in the context of the JVM process that runs the ActorSystem if: * `JavaSerializer` is enabled (default in Akka 2.4.x) - * and TLS is disabled *or* TLS is enabled with `akka.remote.netty.ssl.security.require-mutual-authentication = false` + * and TLS is disabled *or* TLS is enabled with `pekko.remote.netty.ssl.security.require-mutual-authentication = false` (which is still the default in Akka 2.4.x) * or if TLS is enabled with mutual authentication and the authentication keys of a host that is allowed to connect have been compromised, an attacker gained access to a valid certificate (e.g. by compromising a node with certificates issued by the same internal PKI tree to get access of the certificate) * regardless of whether `untrusted` mode is enabled or not diff --git a/docs/src/main/paradox/security/2018-08-29-aes-rng.md b/docs/src/main/paradox/security/2018-08-29-aes-rng.md index 3a27324626..0fc887c083 100644 --- a/docs/src/main/paradox/security/2018-08-29-aes-rng.md +++ b/docs/src/main/paradox/security/2018-08-29-aes-rng.md @@ -28,8 +28,8 @@ configuration of the TLS random number generator should be used: ``` # Set `SecureRandom` RNG explicitly (but it is also the default) -akka.remote.classic.netty.ssl.random-number-generator = "SecureRandom" -akka.remote.artery.ssl.config-ssl-engine.random-number-generator = "SecureRandom" +pekko.remote.classic.netty.ssl.random-number-generator = "SecureRandom" +pekko.remote.artery.ssl.config-ssl-engine.random-number-generator = "SecureRandom" ``` Please subscribe to the [akka-security](https://groups.google.com/forum/#!forum/akka-security) mailing list to be notified promptly about future security issues. @@ -53,10 +53,10 @@ Rationale for the score: * Akka *2.5.0 - 2.5.15* with any of the following configuration properties defined: ``` -akka.remote.netty.ssl.random-number-generator = "AES128CounterSecureRNG" -akka.remote.netty.ssl.random-number-generator = "AES256CounterSecureRNG" -akka.remote.artery.ssl.config-ssl-engine.random-number-generator = "AES128CounterSecureRNG" -akka.remote.artery.ssl.config-ssl-engine.random-number-generator = "AES256CounterSecureRNG" +pekko.remote.netty.ssl.random-number-generator = "AES128CounterSecureRNG" +pekko.remote.netty.ssl.random-number-generator = "AES256CounterSecureRNG" +pekko.remote.artery.ssl.config-ssl-engine.random-number-generator = "AES128CounterSecureRNG" +pekko.remote.artery.ssl.config-ssl-engine.random-number-generator = "AES256CounterSecureRNG" ``` Akka *2.4.x* versions are not affected by this particular bug. It has reached diff --git a/docs/src/main/paradox/security/index.md b/docs/src/main/paradox/security/index.md index fb05687f49..cc12f0ca8b 100644 --- a/docs/src/main/paradox/security/index.md +++ b/docs/src/main/paradox/security/index.md @@ -11,7 +11,7 @@ The mailing list is very low traffic, and receives notifications only after secu We strongly encourage people to report such problems to our private security mailing list first, before disclosing them in a public forum. Following best practice, we strongly encourage anyone to report potential security -vulnerabilities to [security@akka.io](mailto:security@akka.io) before disclosing them in a public forum like the mailing list or as a GitHub issue. +vulnerabilities to [security@pekko.io](mailto:security@pekko.io) before disclosing them in a public forum like the mailing list or as a GitHub issue. Reports to this email address will be handled by our security team, who will work together with you to ensure that a fix can be provided without delay. diff --git a/docs/src/main/paradox/serialization-classic.md b/docs/src/main/paradox/serialization-classic.md index 14ec1181ee..72b9665211 100644 --- a/docs/src/main/paradox/serialization-classic.md +++ b/docs/src/main/paradox/serialization-classic.md @@ -12,7 +12,7 @@ To use Serialization, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/serialization-jackson.md b/docs/src/main/paradox/serialization-jackson.md index 223190fe58..972275f19d 100644 --- a/docs/src/main/paradox/serialization-jackson.md +++ b/docs/src/main/paradox/serialization-jackson.md @@ -10,7 +10,7 @@ To use Jackson Serialization, you must add the following dependency in your proj @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-serialization-jackson_$scala.binary.version$" version=AkkaVersion @@ -436,7 +436,7 @@ The following Jackson modules are enabled by default: @@snip [reference.conf](/akka-serialization-jackson/src/main/resources/reference.conf) { #jackson-modules } -You can amend the configuration `akka.serialization.jackson.jackson-modules` to enable other modules. +You can amend the configuration `pekko.serialization.jackson.jackson-modules` to enable other modules. The [ParameterNamesModule](https://github.com/FasterXML/jackson-modules-java8/tree/master/parameter-names) requires that the `-parameters` Java compiler option is enabled. @@ -478,8 +478,8 @@ The type will be embedded as an object with the fields: ### Configuration per binding By default the configuration for the Jackson serializers and their @javadoc[ObjectMapper](com.fasterxml.jackson.databind.ObjectMapper)s is defined in -the `akka.serialization.jackson` section. It is possible to override that configuration in a more -specific `akka.serialization.jackson.` section. +the `pekko.serialization.jackson` section. It is possible to override that configuration in a more +specific `pekko.serialization.jackson.` section. @@snip [config](/akka-serialization-jackson/src/test/scala/doc/org/apache/pekko/serialization/jackson/SerializationDocSpec.scala) { #specific-config } diff --git a/docs/src/main/paradox/serialization.md b/docs/src/main/paradox/serialization.md index e12bc4ea02..309db607e4 100644 --- a/docs/src/main/paradox/serialization.md +++ b/docs/src/main/paradox/serialization.md @@ -10,7 +10,7 @@ To use Serialization, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor_$scala.binary.version$" version=AkkaVersion @@ -36,13 +36,13 @@ Akka itself uses Protocol Buffers to serialize internal messages (for example cl ### Configuration For Akka to know which `Serializer` to use for what, you need to edit your configuration: -in the `akka.actor.serializers`-section, you bind names to implementations of the @apidoc[serialization.Serializer](Serializer) +in the `pekko.actor.serializers`-section, you bind names to implementations of the @apidoc[serialization.Serializer](Serializer) you wish to use, like this: @@snip [SerializationDocSpec.scala](/docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #serialize-serializers-config } After you've bound names to different implementations of `Serializer` you need to wire which classes -should be serialized using which `Serializer`, this is done in the `akka.actor.serialization-bindings`-section: +should be serialized using which `Serializer`, this is done in the `pekko.actor.serialization-bindings`-section: @@snip [SerializationDocSpec.scala](/docs/src/test/scala/docs/serialization/SerializationDocSpec.scala) { #serialization-bindings-config } @@ -239,13 +239,13 @@ However, for early prototyping it is very convenient to use. For that reason and older systems that rely on Java serialization it can be enabled with the following configuration: ```ruby -akka.actor.allow-java-serialization = on +pekko.actor.allow-java-serialization = on ``` Akka will still log warning when Java serialization is used and to silent that you may add: ```ruby -akka.actor.warn-about-java-serializer-usage = off +pekko.actor.warn-about-java-serializer-usage = off ``` ### Java serialization compatibility @@ -263,14 +263,14 @@ The message class (the bindings) is not used for deserialization. The manifest i That means that it is possible to change serialization for a message by performing two rolling update steps to switch to the new serializer. -1. Add the @scala[`Serializer`]@java[`JSerializer`] class and define it in `akka.actor.serializers` config section, but not in - `akka.actor.serialization-bindings`. Perform a rolling update for this change. This means that the +1. Add the @scala[`Serializer`]@java[`JSerializer`] class and define it in `pekko.actor.serializers` config section, but not in + `pekko.actor.serialization-bindings`. Perform a rolling update for this change. This means that the serializer class exists on all nodes and is registered, but it is still not used for serializing any messages. That is important because during the rolling update the old nodes still don't know about the new serializer and would not be able to deserialize messages with that format. 1. The second change is to register that the serializer is to be used for certain classes by defining - those in the `akka.actor.serialization-bindings` config section. Perform a rolling update for this + those in the `pekko.actor.serialization-bindings` config section. Perform a rolling update for this change. This means that new nodes will use the new serializer when sending messages and old nodes will be able to deserialize the new format. Old nodes will continue to use the old serializer when sending messages and new nodes will be able to deserialize the old format. @@ -286,7 +286,7 @@ Normally, messages sent between local actors (i.e. same JVM) do not undergo seri Certain messages can be excluded from verification by extending the marker @scala[trait]@java[interface] @apidoc[actor.NoSerializationVerificationNeeded](NoSerializationVerificationNeeded) or define a class name prefix in configuration -`akka.actor.no-serialization-verification-needed-class-prefix`. +`pekko.actor.no-serialization-verification-needed-class-prefix`. If you want to verify that your @apidoc[actor.Props] are serializable you can enable the following config option: diff --git a/docs/src/main/paradox/split-brain-resolver.md b/docs/src/main/paradox/split-brain-resolver.md index 478215c824..fc4dfba653 100644 --- a/docs/src/main/paradox/split-brain-resolver.md +++ b/docs/src/main/paradox/split-brain-resolver.md @@ -18,7 +18,7 @@ dependency included. Otherwise, add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster_$scala.binary.version$ version=AkkaVersion @@ -32,7 +32,7 @@ You need to enable the Split Brain Resolver by configuring it as downing provide the `ActorSystem` (`application.conf`): ``` -akka.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" +pekko.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" ``` You should also consider the different available @ref:[downing strategies](#strategies). @@ -118,7 +118,7 @@ When there is uncertainty it selects to down more nodes than necessary, or even Therefore Split Brain Resolver should always be combined with a mechanism to automatically start up nodes that have been shutdown, and join them to the existing cluster or form a new cluster again. -You enable a strategy with the configuration property `akka.cluster.split-brain-resolver.active-strategy`. +You enable a strategy with the configuration property `pekko.cluster.split-brain-resolver.active-strategy`. ### Stable after @@ -129,7 +129,7 @@ while there are unreachable nodes. Joining nodes are not counted in the logic of @@snip [reference.conf](/akka-cluster/src/main/resources/reference.conf) { #split-brain-resolver } -Set `akka.cluster.split-brain-resolver.stable-after` to a shorter duration to have quicker removal of crashed nodes, +Set `pekko.cluster.split-brain-resolver.stable-after` to a shorter duration to have quicker removal of crashed nodes, at the price of risking too early action on transient network partitions that otherwise would have healed. Do not set this to a shorter duration than the membership dissemination time in the cluster, which depends on the cluster size. Recommended minimum duration for different cluster sizes: @@ -161,7 +161,7 @@ That is handled by @ref:[Coordinated Shutdown](coordinated-shutdown.md) but to exit the JVM it's recommended that you enable: ``` -akka.coordinated-shutdown.exit-jvm = on +pekko.coordinated-shutdown.exit-jvm = on ``` @@@ note @@ -207,7 +207,7 @@ it means shutting down more worker nodes. Configuration: ``` -akka.cluster.split-brain-resolver.active-strategy=keep-majority +pekko.cluster.split-brain-resolver.active-strategy=keep-majority ``` @@snip [reference.conf](/akka-cluster/src/main/resources/reference.conf) { #keep-majority } @@ -231,7 +231,7 @@ Therefore it is important that you join new nodes when old nodes have been remov Another consequence of this is that if there are unreachable nodes when starting up the cluster, before reaching this limit, the cluster may shut itself down immediately. This is not an issue -if you start all nodes at approximately the same time or use the `akka.cluster.min-nr-of-members` +if you start all nodes at approximately the same time or use the `pekko.cluster.min-nr-of-members` to define required number of members before the leader changes member status of 'Joining' members to 'Up' You can tune the timeout after which downing decisions are made using the `stable-after` setting. @@ -273,7 +273,7 @@ in the cluster, as described above. Configuration: ``` -akka.cluster.split-brain-resolver.active-strategy=static-quorum +pekko.cluster.split-brain-resolver.active-strategy=static-quorum ``` @@snip [reference.conf](/akka-cluster/src/main/resources/reference.conf) { #static-quorum } @@ -312,7 +312,7 @@ i.e. using the oldest member (singleton) within the nodes with that role. Configuration: ``` -akka.cluster.split-brain-resolver.active-strategy=keep-oldest +pekko.cluster.split-brain-resolver.active-strategy=keep-oldest ``` @@snip [reference.conf](/akka-cluster/src/main/resources/reference.conf) { #keep-oldest } @@ -361,13 +361,13 @@ on another side of a network partition, and then all nodes will be downed. Configuration: ``` -akka { +pekko { cluster { downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" split-brain-resolver { active-strategy = "lease-majority" lease-majority { - lease-implementation = "akka.coordination.lease.kubernetes" + lease-implementation = "pekko.coordination.lease.kubernetes" } } } @@ -411,7 +411,7 @@ continue after the `stable-after` or it can be set to `off` to disable this feat ``` -akka.cluster.split-brain-resolver { +pekko.cluster.split-brain-resolver { down-all-when-unstable = 15s stable-after = 20s } @@ -453,9 +453,9 @@ You would like to configure this to a short duration to have quick failover, but risk of having multiple singleton/sharded instances running at the same time and it may take a different amount of time to act on the decision (dissemination of the down/removal). The duration is by default the same as the `stable-after` property (see @ref:[Stable after](#stable-after) above). It is recommended to -leave this value as is, but it can also be separately overriden with the `akka.cluster.down-removal-margin` property. +leave this value as is, but it can also be separately overriden with the `pekko.cluster.down-removal-margin` property. -Another concern for setting this `stable-after`/`akka.cluster.down-removal-margin` is dealing with JVM pauses e.g. +Another concern for setting this `stable-after`/`pekko.cluster.down-removal-margin` is dealing with JVM pauses e.g. garbage collection. When a node is unresponsive it is not known if it is due to a pause, overload, a crash or a network partition. If it is pause that lasts longer than `stable-after` * 2 it gives time for SBR to down the node and for singletons and shards to be started on other nodes. When the node un-pauses there will be a short time before diff --git a/docs/src/main/paradox/stream/actor-interop.md b/docs/src/main/paradox/stream/actor-interop.md index 58cad01db9..226689a1da 100644 --- a/docs/src/main/paradox/stream/actor-interop.md +++ b/docs/src/main/paradox/stream/actor-interop.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion @@ -240,4 +240,4 @@ A sink that will publish emitted messages to a @apidoc[actor.typed.pubsub.Topic$ @@@ note See also: @ref[ActorSink.actorRefWithBackpressure operator reference docs](operators/PubSub/sink.md) -@@@ \ No newline at end of file +@@@ diff --git a/docs/src/main/paradox/stream/futures-interop.md b/docs/src/main/paradox/stream/futures-interop.md index 5e80e69a75..7d573674f1 100644 --- a/docs/src/main/paradox/stream/futures-interop.md +++ b/docs/src/main/paradox/stream/futures-interop.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/index.md b/docs/src/main/paradox/stream/index.md index 393acea3c7..4784199a14 100644 --- a/docs/src/main/paradox/stream/index.md +++ b/docs/src/main/paradox/stream/index.md @@ -10,7 +10,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/operators/ActorFlow/ask.md b/docs/src/main/paradox/stream/operators/ActorFlow/ask.md index 07c35f646b..41e4411122 100644 --- a/docs/src/main/paradox/stream/operators/ActorFlow/ask.md +++ b/docs/src/main/paradox/stream/operators/ActorFlow/ask.md @@ -11,7 +11,7 @@ This operator is included in: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream-typed_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/operators/ActorFlow/askWithContext.md b/docs/src/main/paradox/stream/operators/ActorFlow/askWithContext.md index cc8e8ace05..16096594c4 100644 --- a/docs/src/main/paradox/stream/operators/ActorFlow/askWithContext.md +++ b/docs/src/main/paradox/stream/operators/ActorFlow/askWithContext.md @@ -11,7 +11,7 @@ This operator is included in: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream-typed_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/operators/ActorFlow/askWithStatus.md b/docs/src/main/paradox/stream/operators/ActorFlow/askWithStatus.md index 90fae8e26f..6d230d444d 100644 --- a/docs/src/main/paradox/stream/operators/ActorFlow/askWithStatus.md +++ b/docs/src/main/paradox/stream/operators/ActorFlow/askWithStatus.md @@ -10,7 +10,7 @@ This operator is included in: @@dependency[sbt,Maven,Gradle] { symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream-typed_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/operators/ActorFlow/askWithStatusAndContext.md b/docs/src/main/paradox/stream/operators/ActorFlow/askWithStatusAndContext.md index 7f472b8102..1f26abbb45 100644 --- a/docs/src/main/paradox/stream/operators/ActorFlow/askWithStatusAndContext.md +++ b/docs/src/main/paradox/stream/operators/ActorFlow/askWithStatusAndContext.md @@ -11,7 +11,7 @@ This operator is included in: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream-typed_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/operators/ActorSink/actorRef.md b/docs/src/main/paradox/stream/operators/ActorSink/actorRef.md index 617f5c6039..ccf2552770 100644 --- a/docs/src/main/paradox/stream/operators/ActorSink/actorRef.md +++ b/docs/src/main/paradox/stream/operators/ActorSink/actorRef.md @@ -11,7 +11,7 @@ This operator is included in: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream-typed_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/operators/ActorSink/actorRefWithBackpressure.md b/docs/src/main/paradox/stream/operators/ActorSink/actorRefWithBackpressure.md index 447382e565..74dcdf199d 100644 --- a/docs/src/main/paradox/stream/operators/ActorSink/actorRefWithBackpressure.md +++ b/docs/src/main/paradox/stream/operators/ActorSink/actorRefWithBackpressure.md @@ -11,7 +11,7 @@ This operator is included in: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream-typed_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/operators/ActorSource/actorRef.md b/docs/src/main/paradox/stream/operators/ActorSource/actorRef.md index a5e73e2446..cbf793556e 100644 --- a/docs/src/main/paradox/stream/operators/ActorSource/actorRef.md +++ b/docs/src/main/paradox/stream/operators/ActorSource/actorRef.md @@ -11,7 +11,7 @@ This operator is included in: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream-typed_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/operators/ActorSource/actorRefWithBackpressure.md b/docs/src/main/paradox/stream/operators/ActorSource/actorRefWithBackpressure.md index ff6f23cc9c..cdead1bc46 100644 --- a/docs/src/main/paradox/stream/operators/ActorSource/actorRefWithBackpressure.md +++ b/docs/src/main/paradox/stream/operators/ActorSource/actorRefWithBackpressure.md @@ -11,7 +11,7 @@ This operator is included in: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream-typed_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/operators/Flow/fromSinkAndSource.md b/docs/src/main/paradox/stream/operators/Flow/fromSinkAndSource.md index b09d08b066..a502612654 100644 --- a/docs/src/main/paradox/stream/operators/Flow/fromSinkAndSource.md +++ b/docs/src/main/paradox/stream/operators/Flow/fromSinkAndSource.md @@ -43,7 +43,7 @@ Java : @@snip [FromSinkAndSource.java](/docs/src/test/java/jdocs/stream/operators/flow/FromSinkAndSource.java) { #chat } -The same patterns can also be applied to @extref:[Akka HTTP WebSockets](akka.http:/server-side/websocket-support.html#server-api) which also have an API accepting a `Flow` of messages. +The same patterns can also be applied to @extref:[Akka HTTP WebSockets](akka.http:/server-side/websocket-support.html#server-api) which also have an API accepting a `Flow` of messages. If we would replace the `fromSinkAndSource` here with `fromSinkAndSourceCoupled` it would allow the client to close the connection by closing its outgoing stream. diff --git a/docs/src/main/paradox/stream/operators/PubSub/sink.md b/docs/src/main/paradox/stream/operators/PubSub/sink.md index 18af6233e9..b770fdd2cc 100644 --- a/docs/src/main/paradox/stream/operators/PubSub/sink.md +++ b/docs/src/main/paradox/stream/operators/PubSub/sink.md @@ -16,7 +16,7 @@ This operator is included in: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion -value1="$akka.version$" +value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream-typed_$scala.binary.version$" version=AkkaVersion @@ -34,4 +34,4 @@ version=AkkaVersion **backpressures** never -@@@ \ No newline at end of file +@@@ diff --git a/docs/src/main/paradox/stream/operators/PubSub/source.md b/docs/src/main/paradox/stream/operators/PubSub/source.md index d2b54fd856..acd614cd13 100644 --- a/docs/src/main/paradox/stream/operators/PubSub/source.md +++ b/docs/src/main/paradox/stream/operators/PubSub/source.md @@ -19,7 +19,7 @@ This operator is included in: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion -value1="$akka.version$" +value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream-typed_$scala.binary.version$" version=AkkaVersion @@ -37,4 +37,4 @@ version=AkkaVersion **completes** when the topic actor terminates -@@@ \ No newline at end of file +@@@ diff --git a/docs/src/main/paradox/stream/operators/Source/range.md b/docs/src/main/paradox/stream/operators/Source/range.md index bf75041c28..7fd9a1ee4a 100644 --- a/docs/src/main/paradox/stream/operators/Source/range.md +++ b/docs/src/main/paradox/stream/operators/Source/range.md @@ -9,7 +9,7 @@ Emit each integer in a range, with an option to take bigger steps than 1. @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/operators/StreamConverters/fromInputStream.md b/docs/src/main/paradox/stream/operators/StreamConverters/fromInputStream.md index ae38fa6930..df252ce8ac 100644 --- a/docs/src/main/paradox/stream/operators/StreamConverters/fromInputStream.md +++ b/docs/src/main/paradox/stream/operators/StreamConverters/fromInputStream.md @@ -16,7 +16,7 @@ underlying `java.io.InputStream` returns on each read invocation. Such chunks wi than `chunkSize` though. You can configure the default dispatcher for this Source by changing -the `akka.stream.materializer.blocking-io-dispatcher` or set it for a given Source by +the `pekko.stream.materializer.blocking-io-dispatcher` or set it for a given Source by using `org.apache.pekko.stream.ActorAttributes`. It materializes a @java[`CompletionStage`]@scala[`Future`] of `IOResult` containing the number of bytes read from the source file diff --git a/docs/src/main/paradox/stream/operators/index.md b/docs/src/main/paradox/stream/operators/index.md index 41a24c31a2..1cabbf98b4 100644 --- a/docs/src/main/paradox/stream/operators/index.md +++ b/docs/src/main/paradox/stream/operators/index.md @@ -89,7 +89,7 @@ These built-in sinks are available from @scala[`org.apache.pekko.stream.scaladsl Sources and sinks for integrating with `java.io.InputStream` and `java.io.OutputStream` can be found on `StreamConverters`. As they are blocking APIs the implementations of these operators are run on a separate -dispatcher configured through the `akka.stream.blocking-io-dispatcher`. +dispatcher configured through the `pekko.stream.blocking-io-dispatcher`. @@@ warning diff --git a/docs/src/main/paradox/stream/reactive-streams-interop.md b/docs/src/main/paradox/stream/reactive-streams-interop.md index 7cef7b8b98..8f40c9a640 100644 --- a/docs/src/main/paradox/stream/reactive-streams-interop.md +++ b/docs/src/main/paradox/stream/reactive-streams-interop.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/stream-composition.md b/docs/src/main/paradox/stream/stream-composition.md index 8e212fe8de..ca7a3726f6 100644 --- a/docs/src/main/paradox/stream/stream-composition.md +++ b/docs/src/main/paradox/stream/stream-composition.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/stream-cookbook.md b/docs/src/main/paradox/stream/stream-cookbook.md index 70fb9f549c..48e68c6ed1 100644 --- a/docs/src/main/paradox/stream/stream-cookbook.md +++ b/docs/src/main/paradox/stream/stream-cookbook.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/stream-customize.md b/docs/src/main/paradox/stream/stream-customize.md index 731d2d1fe8..0ba88c9636 100644 --- a/docs/src/main/paradox/stream/stream-customize.md +++ b/docs/src/main/paradox/stream/stream-customize.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/stream-dynamic.md b/docs/src/main/paradox/stream/stream-dynamic.md index adba28fa29..a375f58e84 100644 --- a/docs/src/main/paradox/stream/stream-dynamic.md +++ b/docs/src/main/paradox/stream/stream-dynamic.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/stream-error.md b/docs/src/main/paradox/stream/stream-error.md index c62afb94a9..e0ab1d3577 100644 --- a/docs/src/main/paradox/stream/stream-error.md +++ b/docs/src/main/paradox/stream/stream-error.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/stream-flows-and-basics.md b/docs/src/main/paradox/stream/stream-flows-and-basics.md index d22908f066..3fe1418601 100644 --- a/docs/src/main/paradox/stream/stream-flows-and-basics.md +++ b/docs/src/main/paradox/stream/stream-flows-and-basics.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/stream-graphs.md b/docs/src/main/paradox/stream/stream-graphs.md index 18cdfcdfd0..7a854a90e8 100644 --- a/docs/src/main/paradox/stream/stream-graphs.md +++ b/docs/src/main/paradox/stream/stream-graphs.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/stream-io.md b/docs/src/main/paradox/stream/stream-io.md index 598ad4523d..9a0a61ae4d 100644 --- a/docs/src/main/paradox/stream/stream-io.md +++ b/docs/src/main/paradox/stream/stream-io.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion @@ -176,7 +176,7 @@ Java Please note that these operators are backed by Actors and by default are configured to run on a pre-configured threadpool-backed dispatcher dedicated for File IO. This is very important as it isolates the blocking file IO operations from the rest of the ActorSystem allowing each dispatcher to be utilised in the most efficient way. If you want to configure a custom -dispatcher for file IO operations globally, you can do so by changing the `akka.stream.materializer.blocking-io-dispatcher`, +dispatcher for file IO operations globally, you can do so by changing the `pekko.stream.materializer.blocking-io-dispatcher`, or for a specific operator by specifying a custom Dispatcher in code, like this: Scala diff --git a/docs/src/main/paradox/stream/stream-parallelism.md b/docs/src/main/paradox/stream/stream-parallelism.md index f7febb34d7..643f13734b 100644 --- a/docs/src/main/paradox/stream/stream-parallelism.md +++ b/docs/src/main/paradox/stream/stream-parallelism.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/stream-quickstart.md b/docs/src/main/paradox/stream/stream-quickstart.md index 3f8ed32867..4d2b91fe92 100644 --- a/docs/src/main/paradox/stream/stream-quickstart.md +++ b/docs/src/main/paradox/stream/stream-quickstart.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/stream-rate.md b/docs/src/main/paradox/stream/stream-rate.md index d7a0c6e1f0..711667067d 100644 --- a/docs/src/main/paradox/stream/stream-rate.md +++ b/docs/src/main/paradox/stream/stream-rate.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion @@ -75,7 +75,7 @@ and increase them only to a level suitable for the throughput requirements of th can be set through configuration: ``` -akka.stream.materializer.max-input-buffer-size = 16 +pekko.stream.materializer.max-input-buffer-size = 16 ``` Alternatively they can be set per stream by adding an attribute to the complete `RunnableGraph` or on smaller segments diff --git a/docs/src/main/paradox/stream/stream-refs.md b/docs/src/main/paradox/stream/stream-refs.md index d3244b346b..53f39cd179 100644 --- a/docs/src/main/paradox/stream/stream-refs.md +++ b/docs/src/main/paradox/stream/stream-refs.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion @@ -198,7 +198,7 @@ timeout has triggered, materialization of the target side will fail, pointing ou Since these timeouts are often very different based on the kind of stream offered, and there can be many different kinds of them in the same application, it is possible to not only configure this setting -globally (`akka.stream.materializer.stream-ref.subscription-timeout`), but also via attributes: +globally (`pekko.stream.materializer.stream-ref.subscription-timeout`), but also via attributes: Scala : @@snip [FlowStreamRefsDocSpec.scala](/docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #attr-sub-timeout } @@ -209,6 +209,6 @@ Java ### General configuration Other settings can be set globally in your `application.conf`, by overriding any of the following values -in the `akka.stream.materializer.stream-ref.*` keyspace: +in the `pekko.stream.materializer.stream-ref.*` keyspace: @@snip [reference.conf](/akka-stream/src/main/resources/reference.conf) { #stream-ref } diff --git a/docs/src/main/paradox/stream/stream-substream.md b/docs/src/main/paradox/stream/stream-substream.md index 9a258c1a76..aebcba0f54 100644 --- a/docs/src/main/paradox/stream/stream-substream.md +++ b/docs/src/main/paradox/stream/stream-substream.md @@ -7,7 +7,7 @@ To use Akka Streams, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/stream/stream-testkit.md b/docs/src/main/paradox/stream/stream-testkit.md index 21a93238be..04c4438bc5 100644 --- a/docs/src/main/paradox/stream/stream-testkit.md +++ b/docs/src/main/paradox/stream/stream-testkit.md @@ -7,7 +7,7 @@ To use Akka Stream TestKit, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-stream-testkit_$scala.binary.version$" version=AkkaVersion @@ -152,7 +152,7 @@ more aggressively (at the cost of reduced performance) and therefore helps expos enable this setting add the following line to your configuration: ``` -akka.stream.materializer.debug.fuzzing-mode = on +pekko.stream.materializer.debug.fuzzing-mode = on ``` @@@ warning diff --git a/docs/src/main/paradox/supervision-classic.md b/docs/src/main/paradox/supervision-classic.md index 371b5baf5d..5acb9c7426 100644 --- a/docs/src/main/paradox/supervision-classic.md +++ b/docs/src/main/paradox/supervision-classic.md @@ -77,7 +77,7 @@ user-created actors, the guardian named `"/user"`. Actors created using guardian terminates, all normal actors in the system will be shutdown, too. It also means that this guardian’s supervisor strategy determines how the top-level normal actors are supervised. Since Akka 2.1 it is possible to -configure this using the setting `akka.actor.guardian-supervisor-strategy`, +configure this using the setting `pekko.actor.guardian-supervisor-strategy`, which takes the fully-qualified class-name of a `SupervisorStrategyConfigurator`. When the guardian escalates a failure, the root guardian’s response will be to terminate the guardian, which in effect diff --git a/docs/src/main/paradox/testing.md b/docs/src/main/paradox/testing.md index 5064db9c05..e14d03be11 100644 --- a/docs/src/main/paradox/testing.md +++ b/docs/src/main/paradox/testing.md @@ -10,7 +10,7 @@ To use Akka Testkit, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-testkit_$scala.binary.version$" version=AkkaVersion @@ -87,7 +87,7 @@ Java In these examples, the maximum durations you will find mentioned below are left out, in which case they use the default value from the configuration item -`akka.test.single-expect-default` which itself defaults to 3 seconds (or they +`pekko.test.single-expect-default` which itself defaults to 3 seconds (or they obey the innermost enclosing `Within` as detailed @ref:[below](#testkit-within)). The full signatures are: * @scala[`expectMsg[T](d: Duration, msg: T): T`]@java[`public  T expectMsgEquals(Duration max, T msg)`] @@ -233,7 +233,7 @@ Java If the number of occurrences is specific—as demonstrated above—then `intercept` will block until that number of matching messages have been received or the -timeout configured in `akka.test.filter-leeway` is used up (time starts +timeout configured in `pekko.test.filter-leeway` is used up (time starts counting after the passed-in block of code returns). In case of a timeout the test fails. @@ -244,7 +244,7 @@ Be sure to exchange the default logger with the function: ``` -akka.loggers = [org.apache.pekko.testkit.TestEventListener] +pekko.loggers = [org.apache.pekko.testkit.TestEventListener] ``` @@@ @@ -321,10 +321,10 @@ The tight timeouts you use during testing on your lightning-fast notebook will invariably lead to spurious test failures on the heavily loaded Jenkins server (or similar). To account for this situation, all maximum durations are internally scaled by a factor taken from the @ref:[Configuration](general/configuration-reference.md#config-akka-testkit), -`akka.test.timefactor`, which defaults to 1. +`pekko.test.timefactor`, which defaults to 1. You can scale other durations with the same factor by using the @scala[implicit conversion -in `akka.testkit` package object to add dilated function to `Duration`]@java[`dilated` method in `TestKit`]. +in `pekko.testkit` package object to add dilated function to `Duration`]@java[`dilated` method in `TestKit`]. Scala : @@snip [TestkitDocSpec.scala](/docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #duration-dilation } @@ -715,7 +715,7 @@ options: @@@ div { .group-scala } * *Logging of message invocations on certain actors* This is enabled by a setting in the @ref:[Configuration](general/configuration-reference.md#config-akka-actor) — namely -`akka.actor.debug.receive` — which enables the `loggable` +`pekko.actor.debug.receive` — which enables the `loggable` statement to be applied to an actor’s `receive` function: @@snip [TestkitDocSpec.scala](/docs/src/test/scala/docs/testkit/TestkitDocSpec.scala) { #logging-receive } @@ -733,18 +733,18 @@ would lead to endless loops if it were applied to event bus logger listeners. * *Logging of special messages* Actors handle certain special messages automatically, e.g. `Kill`, `PoisonPill`, etc. Tracing of these message invocations is enabled by -the setting `akka.actor.debug.autoreceive`, which enables this on all +the setting `pekko.actor.debug.autoreceive`, which enables this on all actors. * *Logging of the actor lifecycle* Actor creation, start, restart, monitor start, monitor stop and stop may be traced by -enabling the setting `akka.actor.debug.lifecycle`; this, too, is enabled +enabling the setting `pekko.actor.debug.lifecycle`; this, too, is enabled uniformly on all actors. Logging of these messages is at `DEBUG` level. To summarize, you can enable full logging of actor activities using this configuration fragment: ``` -akka { +pekko { loglevel = "DEBUG" actor { debug { diff --git a/docs/src/main/paradox/typed/actor-discovery.md b/docs/src/main/paradox/typed/actor-discovery.md index ad21076b8e..87bfa6cb8b 100644 --- a/docs/src/main/paradox/typed/actor-discovery.md +++ b/docs/src/main/paradox/typed/actor-discovery.md @@ -9,7 +9,7 @@ To use Akka Actor Typed, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-actor-typed_$scala.binary.version$ version=AkkaVersion diff --git a/docs/src/main/paradox/typed/actor-lifecycle.md b/docs/src/main/paradox/typed/actor-lifecycle.md index f008619909..7c91cc5c4d 100644 --- a/docs/src/main/paradox/typed/actor-lifecycle.md +++ b/docs/src/main/paradox/typed/actor-lifecycle.md @@ -12,7 +12,7 @@ To use Akka Actor Typed, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-actor-typed_$scala.binary.version$ version=AkkaVersion diff --git a/docs/src/main/paradox/typed/actors.md b/docs/src/main/paradox/typed/actors.md index 031e2b2e48..9bb8be8525 100644 --- a/docs/src/main/paradox/typed/actors.md +++ b/docs/src/main/paradox/typed/actors.md @@ -12,7 +12,7 @@ To use Akka Actors, add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-actor-typed_$scala.binary.version$ version=AkkaVersion @@ -148,18 +148,18 @@ An application normally consists of a single @apidoc[typed.ActorSystem], running The console output may look like this: ``` -[INFO] [03/13/2018 15:50:05.814] [hello-akka.actor.default-dispatcher-4] [akka://hello/user/greeter] Hello World! -[INFO] [03/13/2018 15:50:05.815] [hello-akka.actor.default-dispatcher-4] [akka://hello/user/greeter] Hello Akka! -[INFO] [03/13/2018 15:50:05.815] [hello-akka.actor.default-dispatcher-2] [akka://hello/user/World] Greeting 1 for World -[INFO] [03/13/2018 15:50:05.815] [hello-akka.actor.default-dispatcher-4] [akka://hello/user/Akka] Greeting 1 for Akka -[INFO] [03/13/2018 15:50:05.815] [hello-akka.actor.default-dispatcher-5] [akka://hello/user/greeter] Hello World! -[INFO] [03/13/2018 15:50:05.815] [hello-akka.actor.default-dispatcher-5] [akka://hello/user/greeter] Hello Akka! -[INFO] [03/13/2018 15:50:05.815] [hello-akka.actor.default-dispatcher-4] [akka://hello/user/World] Greeting 2 for World -[INFO] [03/13/2018 15:50:05.815] [hello-akka.actor.default-dispatcher-5] [akka://hello/user/greeter] Hello World! -[INFO] [03/13/2018 15:50:05.815] [hello-akka.actor.default-dispatcher-4] [akka://hello/user/Akka] Greeting 2 for Akka -[INFO] [03/13/2018 15:50:05.816] [hello-akka.actor.default-dispatcher-5] [akka://hello/user/greeter] Hello Akka! -[INFO] [03/13/2018 15:50:05.816] [hello-akka.actor.default-dispatcher-4] [akka://hello/user/World] Greeting 3 for World -[INFO] [03/13/2018 15:50:05.816] [hello-akka.actor.default-dispatcher-6] [akka://hello/user/Akka] Greeting 3 for Akka +[INFO] [03/13/2018 15:50:05.814] [hello-pekko.actor.default-dispatcher-4] [akka://hello/user/greeter] Hello World! +[INFO] [03/13/2018 15:50:05.815] [hello-pekko.actor.default-dispatcher-4] [akka://hello/user/greeter] Hello Akka! +[INFO] [03/13/2018 15:50:05.815] [hello-pekko.actor.default-dispatcher-2] [akka://hello/user/World] Greeting 1 for World +[INFO] [03/13/2018 15:50:05.815] [hello-pekko.actor.default-dispatcher-4] [akka://hello/user/Akka] Greeting 1 for Akka +[INFO] [03/13/2018 15:50:05.815] [hello-pekko.actor.default-dispatcher-5] [akka://hello/user/greeter] Hello World! +[INFO] [03/13/2018 15:50:05.815] [hello-pekko.actor.default-dispatcher-5] [akka://hello/user/greeter] Hello Akka! +[INFO] [03/13/2018 15:50:05.815] [hello-pekko.actor.default-dispatcher-4] [akka://hello/user/World] Greeting 2 for World +[INFO] [03/13/2018 15:50:05.815] [hello-pekko.actor.default-dispatcher-5] [akka://hello/user/greeter] Hello World! +[INFO] [03/13/2018 15:50:05.815] [hello-pekko.actor.default-dispatcher-4] [akka://hello/user/Akka] Greeting 2 for Akka +[INFO] [03/13/2018 15:50:05.816] [hello-pekko.actor.default-dispatcher-5] [akka://hello/user/greeter] Hello Akka! +[INFO] [03/13/2018 15:50:05.816] [hello-pekko.actor.default-dispatcher-4] [akka://hello/user/World] Greeting 3 for World +[INFO] [03/13/2018 15:50:05.816] [hello-pekko.actor.default-dispatcher-6] [akka://hello/user/Akka] Greeting 3 for Akka ``` You will also need to add a @ref:[logging dependency](logging.md) to see that output when running. diff --git a/docs/src/main/paradox/typed/cluster-dc.md b/docs/src/main/paradox/typed/cluster-dc.md index 497e5586fa..80bb8a455a 100644 --- a/docs/src/main/paradox/typed/cluster-dc.md +++ b/docs/src/main/paradox/typed/cluster-dc.md @@ -21,7 +21,7 @@ To use Akka Cluster add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-typed_$scala.binary.version$ version=AkkaVersion @@ -78,7 +78,7 @@ Cluster can span multiple data centers and still be tolerant to network partitio ## Defining the data centers The features are based on the idea that nodes can be assigned to a group of nodes -by setting the `akka.cluster.multi-data-center.self-data-center` configuration property. +by setting the `pekko.cluster.multi-data-center.self-data-center` configuration property. A node can only belong to one data center and if nothing is specified a node will belong to the `default` data center. @@ -125,8 +125,8 @@ be interpreted as an indication of problem with the network link between the dat Two different failure detectors can be configured for these two purposes: -* `akka.cluster.failure-detector` for failure detection within own data center -* `akka.cluster.multi-data-center.failure-detector` for failure detection across different data centers +* `pekko.cluster.failure-detector` for failure detection within own data center +* `pekko.cluster.multi-data-center.failure-detector` for failure detection across different data centers When @ref[subscribing to cluster events](cluster.md#cluster-subscriptions) the `UnreachableMember` and `ReachableMember` events are for observations within the own data center. The same data center as where the @@ -136,7 +136,7 @@ For cross data center unreachability notifications you can subscribe to `Unreach events. Heartbeat messages for failure detection across data centers are only performed between a number of the -oldest nodes on each side. The number of nodes is configured with `akka.cluster.multi-data-center.cross-data-center-connections`. +oldest nodes on each side. The number of nodes is configured with `pekko.cluster.multi-data-center.cross-data-center-connections`. The reason for only using a limited number of nodes is to keep the number of connections across data centers low. The same nodes are also used for the gossip protocol when disseminating the membership information across data centers. Within a data center all nodes are involved in gossip and failure detection. diff --git a/docs/src/main/paradox/typed/cluster-membership.md b/docs/src/main/paradox/typed/cluster-membership.md index 79b1c514ef..2d7eba183c 100644 --- a/docs/src/main/paradox/typed/cluster-membership.md +++ b/docs/src/main/paradox/typed/cluster-membership.md @@ -35,7 +35,7 @@ merged and converge to the same end result. * **joining** - transient state when joining a cluster - * **weakly up** - transient state while network split (only if `akka.cluster.allow-weakly-up-members=on`) + * **weakly up** - transient state while network split (only if `pekko.cluster.allow-weakly-up-members=on`) * **up** - normal operating state @@ -121,7 +121,7 @@ Another transition that is possible without convergence is marking members as `W If a node is `unreachable` then gossip convergence is not possible and therefore most `leader` actions are impossible. By enabling -`akka.cluster.allow-weakly-up-members` (which is enabled by default), joining nodes can be promoted to `WeaklyUp` +`pekko.cluster.allow-weakly-up-members` (which is enabled by default), joining nodes can be promoted to `WeaklyUp` even while convergence is not yet reached. Once gossip convergence can be established again, the leader will move `WeaklyUp` members to `Up`. diff --git a/docs/src/main/paradox/typed/cluster-sharded-daemon-process.md b/docs/src/main/paradox/typed/cluster-sharded-daemon-process.md index 949b9976b5..6e4deec0aa 100644 --- a/docs/src/main/paradox/typed/cluster-sharded-daemon-process.md +++ b/docs/src/main/paradox/typed/cluster-sharded-daemon-process.md @@ -7,7 +7,7 @@ To use Akka Sharded Daemon Process, you must add the following dependency in you @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-sharding-typed_$scala.binary.version$ version=AkkaVersion diff --git a/docs/src/main/paradox/typed/cluster-sharding.md b/docs/src/main/paradox/typed/cluster-sharding.md index 246d09cc8a..9d39b3aa20 100644 --- a/docs/src/main/paradox/typed/cluster-sharding.md +++ b/docs/src/main/paradox/typed/cluster-sharding.md @@ -12,7 +12,7 @@ To use Akka Cluster Sharding, you must add the following dependency in your proj @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-sharding-typed_$scala.binary.version$ version=AkkaVersion @@ -195,12 +195,12 @@ The new algorithm is recommended and will become the default in future versions You enable the new algorithm by setting `rebalance-absolute-limit` > 0, for example: ``` -akka.cluster.sharding.least-shard-allocation-strategy.rebalance-absolute-limit = 20 +pekko.cluster.sharding.least-shard-allocation-strategy.rebalance-absolute-limit = 20 ``` The `rebalance-absolute-limit` is the maximum number of shards that will be rebalanced in one rebalance round. -You may also want to tune the `akka.cluster.sharding.least-shard-allocation-strategy.rebalance-relative-limit`. +You may also want to tune the `pekko.cluster.sharding.least-shard-allocation-strategy.rebalance-relative-limit`. The `rebalance-relative-limit` is a fraction (< 1.0) of total number of (known) shards that will be rebalanced in one rebalance round. The lower result of `rebalance-relative-limit` and `rebalance-absolute-limit` will be used. @@ -304,7 +304,7 @@ testing and feedback. @@@ -Automatic passivation can be disabled by setting `akka.cluster.sharding.passivation.strategy = none`. It is disabled +Automatic passivation can be disabled by setting `pekko.cluster.sharding.passivation.strategy = none`. It is disabled automatically if @ref:[Remembering Entities](#remembering-entities) is enabled. @@@ note @@ -362,7 +362,7 @@ and idle entity timeouts. ### Custom passivation strategies To configure a custom passivation strategy, create a configuration section for the strategy under -`akka.cluster.sharding.passivation` and select this strategy using the `strategy` setting. The strategy needs a +`pekko.cluster.sharding.passivation` and select this strategy using the `strategy` setting. The strategy needs a _replacement policy_ to be chosen, an _active entity limit_ to be set, and can optionally [passivate idle entities](#idle-entity-passivation). For example, a custom strategy can be configured to use the [least recently used policy](#least-recently-used-policy): @@ -544,7 +544,7 @@ There are two options for the state store: To enable distributed data store mode (the default): ``` -akka.cluster.sharding.state-store-mode = ddata +pekko.cluster.sharding.state-store-mode = ddata ``` The state of the `ShardCoordinator` is replicated across the cluster but is not stored to disk. @@ -558,7 +558,7 @@ that contains the node role and therefore the role configuration must be the sam cluster, for example you can't change the roles when performing a rolling update. Changing roles requires @ref:[a full cluster restart](../additional/rolling-updates.md#cluster-sharding-configuration-change). -The `akka.cluster.sharding.distributed-data` config section configures the settings for Distributed Data. +The `pekko.cluster.sharding.distributed-data` config section configures the settings for Distributed Data. It's not possible to have different `distributed-data` settings for different sharding entity types. #### Persistence mode @@ -566,7 +566,7 @@ It's not possible to have different `distributed-data` settings for different sh To enable persistence store mode: ``` -akka.cluster.sharding.state-store-mode = persistence +pekko.cluster.sharding.state-store-mode = persistence ``` Since it is running in a cluster @ref:[Persistence](persistence.md) must be configured with a distributed journal. @@ -590,7 +590,7 @@ for example with @ref:[Event Sourcing](persistence.md). To enable remember entities set `rememberEntities` flag to true in @apidoc[typed.ClusterShardingSettings] when starting a shard region (or its proxy) for a given `entity` type or configure -`akka.cluster.sharding.remember-entities = on`. +`pekko.cluster.sharding.remember-entities = on`. Starting and stopping entities has an overhead but this is limited by batching operations to the underlying remember entities store. @@ -618,13 +618,13 @@ There are two options for the remember entities store: Enable ddata mode with (enabled by default): ``` -akka.cluster.sharding.remember-entities-store = ddata +pekko.cluster.sharding.remember-entities-store = ddata ``` To support restarting entities after a full cluster restart (non-rolling) the remember entities store is persisted to disk by distributed data. This can be disabled if not needed: ``` -akka.cluster.sharding.distributed-data.durable.keys = [] +pekko.cluster.sharding.distributed-data.durable.keys = [] ``` Reasons for disabling: @@ -639,15 +639,15 @@ For supporting remembered entities in an environment without disk storage use `e Enable `eventsourced` mode with: ``` -akka.cluster.sharding.remember-entities-store = eventsourced +pekko.cluster.sharding.remember-entities-store = eventsourced ``` This mode uses @ref:[Event Sourcing](./persistence.md) to store the active shards and active entities for each shard so a persistence and snapshot plugin must be configured. ``` -akka.cluster.sharding.journal-plugin-id = -akka.cluster.sharding.snapshot-plugin-id = +pekko.cluster.sharding.journal-plugin-id = +pekko.cluster.sharding.snapshot-plugin-id = ``` ### Migrating from deprecated persistence mode @@ -664,7 +664,7 @@ For migrating existing remembered entities an event adapter needs to be configur In this example `cassandra` is the used journal: ``` -akka.persistence.cassandra.journal { +pekko.persistence.cassandra.journal { event-adapters { coordinator-migration = "org.apache.pekko.cluster.sharding.OldCoordinatorStateMigrationEventAdapter" } @@ -679,7 +679,7 @@ Once you have migrated you cannot go back to the old persistence store, a rollin When @ref:[Distributed Data mode](#distributed-data-mode) is used the identifiers of the entities are stored in @ref:[Durable Storage](distributed-data.md#durable-storage) of Distributed Data. You may want to change the -configuration of the `akka.cluster.sharding.distributed-data.durable.lmdb.dir`, since +configuration of the `pekko.cluster.sharding.distributed-data.durable.lmdb.dir`, since the default directory contains the remote port of the actor system. If using a dynamically assigned port (0) it will be different each time and the previously stored data will not be loaded. @@ -689,12 +689,12 @@ disk, is that the same entities should be started also after a complete cluster you can disable durable storage and benefit from better performance by using the following configuration: ``` -akka.cluster.sharding.distributed-data.durable.keys = [] +pekko.cluster.sharding.distributed-data.durable.keys = [] ``` ## Startup after minimum number of members -It's recommended to use Cluster Sharding with the Cluster setting `akka.cluster.min-nr-of-members` or -`akka.cluster.role..min-nr-of-members`. `min-nr-of-members` will defer the allocation of the shards +It's recommended to use Cluster Sharding with the Cluster setting `pekko.cluster.min-nr-of-members` or +`pekko.cluster.role..min-nr-of-members`. `min-nr-of-members` will defer the allocation of the shards until at least that number of regions have been started and registered to the coordinator. This avoids that many shards are allocated to the first region that registers and only later are rebalanced to other nodes. @@ -713,7 +713,7 @@ The health check does not fail after an initial successful check. Once a shard r Cluster sharding enables the health check automatically. To disable: ```ruby -akka.management.health-checks.readiness-checks { +pekko.management.health-checks.readiness-checks { sharding = "" } ``` @@ -721,7 +721,7 @@ akka.management.health-checks.readiness-checks { Monitoring of each shard region is off by default. Add them by defining the entity type names (`EntityTypeKey.name`): ```ruby -akka.cluster.sharding.healthcheck.names = ["counter-1", "HelloWorld"] +pekko.cluster.sharding.healthcheck.names = ["counter-1", "HelloWorld"] ``` See also additional information about how to make @ref:[smooth rolling updates](../additional/rolling-updates.md#cluster-sharding). @@ -750,7 +750,7 @@ Scala Java : @@snip [ShardingCompileOnlyTest.java](/akka-cluster-sharding-typed/src/test/java/jdocs/org/apache/pekko/cluster/sharding/typed/ShardingCompileOnlyTest.java) { #get-cluster-sharding-stats } -If any shard queries failed, for example due to timeout if a shard was too busy to reply within the configured `akka.cluster.sharding.shard-region-query-timeout`, +If any shard queries failed, for example due to timeout if a shard was too busy to reply within the configured `pekko.cluster.sharding.shard-region-query-timeout`, `ShardRegion.CurrentShardRegionState` and `ShardRegion.ClusterShardingStats` will also include the set of shard identifiers by region that failed. The purpose of these messages is testing and monitoring, they are not provided to give access to @@ -769,7 +769,7 @@ Reasons for how this can happen: A lease can be a final backup that means that each shard won't create child entity actors unless it has the lease. -To use a lease for sharding set `akka.cluster.sharding.use-lease` to the configuration location +To use a lease for sharding set `pekko.cluster.sharding.use-lease` to the configuration location of the lease to use. Each shard will try and acquire a lease with with the name `-shard--` and the owner is set to the `Cluster(system).selfAddress.hostPort`. diff --git a/docs/src/main/paradox/typed/cluster-singleton.md b/docs/src/main/paradox/typed/cluster-singleton.md index 5e92d61835..cc167f95b7 100644 --- a/docs/src/main/paradox/typed/cluster-singleton.md +++ b/docs/src/main/paradox/typed/cluster-singleton.md @@ -9,7 +9,7 @@ To use Cluster Singleton, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-typed_$scala.binary.version$ version=AkkaVersion @@ -174,7 +174,7 @@ don't run at the same time. Reasons for how this can happen: A lease can be a final backup that means that the singleton actor won't be created unless the lease can be acquired. -To use a lease for singleton set `akka.cluster.singleton.use-lease` to the configuration location +To use a lease for singleton set `pekko.cluster.singleton.use-lease` to the configuration location of the lease to use. A lease with with the name `-singleton-` is used and the owner is set to the @scala[`Cluster(system).selfAddress.hostPort`]@java[`Cluster.get(system).selfAddress().hostPort()`]. diff --git a/docs/src/main/paradox/typed/cluster.md b/docs/src/main/paradox/typed/cluster.md index d13b57db71..96fd300c4a 100644 --- a/docs/src/main/paradox/typed/cluster.md +++ b/docs/src/main/paradox/typed/cluster.md @@ -28,7 +28,7 @@ To use Akka Cluster add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-typed_$scala.binary.version$ version=AkkaVersion @@ -57,7 +57,7 @@ Java : @@snip [BasicClusterExampleTest.java](/akka-cluster-typed/src/test/java/jdocs/org/apache/pekko/cluster/typed/BasicClusterExampleTest.java) { #cluster-imports } -The minimum configuration required is to set a host/port for remoting and the `akka.actor.provider = "cluster"`. +The minimum configuration required is to set a host/port for remoting and the `pekko.actor.provider = "cluster"`. @@snip [BasicClusterExampleSpec.scala](/akka-cluster-typed/src/test/scala/docs/org/apache/pekko/cluster/typed/BasicClusterExampleSpec.scala) { #config-seeds } @@ -155,7 +155,7 @@ it retries this procedure until success or shutdown. You can define the seed nodes in the @ref:[configuration](#configuration) file (application.conf): ``` -akka.cluster.seed-nodes = [ +pekko.cluster.seed-nodes = [ "akka://ClusterSystem@host1:2552", "akka://ClusterSystem@host2:2552"] ``` @@ -163,8 +163,8 @@ akka.cluster.seed-nodes = [ This can also be defined as Java system properties when starting the JVM using the following syntax: ``` --Dakka.cluster.seed-nodes.0=akka://ClusterSystem@host1:2552 --Dakka.cluster.seed-nodes.1=akka://ClusterSystem@host2:2552 +-Dpekko.cluster.seed-nodes.0=akka://ClusterSystem@host1:2552 +-Dpekko.cluster.seed-nodes.1=akka://ClusterSystem@host2:2552 ``` @@ -228,8 +228,8 @@ the JVM. If the `seed-nodes` are assembled dynamically, it is useful to define t and a restart with new seed-nodes should be tried after unsuccessful attempts. ``` -akka.cluster.shutdown-after-unsuccessful-join-seed-nodes = 20s -akka.coordinated-shutdown.exit-jvm = on +pekko.cluster.shutdown-after-unsuccessful-join-seed-nodes = 20s +pekko.coordinated-shutdown.exit-jvm = on ``` If you don't configure seed nodes or use one of the join seed node functions, you need to join the cluster manually @@ -284,7 +284,7 @@ We recommend that you enable the @ref:[Split Brain Resolver](../split-brain-reso Akka Cluster module. You enable it with configuration: ``` -akka.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" +pekko.cluster.downing-provider-class = "org.apache.pekko.cluster.sbr.SplitBrainResolverProvider" ``` You should also consider the different available @ref:[downing strategies](../split-brain-resolver.md#strategies). @@ -313,7 +313,7 @@ Not all nodes of a cluster need to perform the same function. For example, there one which runs the data access layer and one for the number-crunching. Choosing which actors to start on each node, for example cluster-aware routers, can take node roles into account to achieve this distribution of responsibilities. -The node roles are defined in the configuration property named `akka.cluster.roles` +The node roles are defined in the configuration property named `pekko.cluster.roles` and typically defined in the start script as a system property or environment variable. The roles are part of the membership information in @apidoc[MemberEvent](ClusterEvent.MemberEvent) that you can subscribe to. The roles @@ -341,14 +341,14 @@ Cluster uses the @apidoc[remote.PhiAccrualFailureDetector](PhiAccrualFailureDete implementing the @apidoc[remote.FailureDetector](FailureDetector) and configuring it: ``` -akka.cluster.implementation-class = "com.example.CustomFailureDetector" +pekko.cluster.implementation-class = "com.example.CustomFailureDetector" ``` In the @ref:[Cluster Configuration](#configuration) you may want to adjust these depending on you environment: -* When a *phi* value is considered to be a failure `akka.cluster.failure-detector.threshold` -* Margin of error for sudden abnormalities `akka.cluster.failure-detector.acceptable-heartbeat-pause` +* When a *phi* value is considered to be a failure `pekko.cluster.failure-detector.threshold` +* Margin of error for sudden abnormalities `pekko.cluster.failure-detector.acceptable-heartbeat-pause` ## How to test @@ -373,14 +373,14 @@ With a configuration option you can define the required number of members before the leader changes member status of 'Joining' members to 'Up'.: ``` -akka.cluster.min-nr-of-members = 3 +pekko.cluster.min-nr-of-members = 3 ``` In a similar way you can define the required number of members of a certain role before the leader changes member status of 'Joining' members to 'Up'.: ``` -akka.cluster.role { +pekko.cluster.role { frontend.min-nr-of-members = 1 backend.min-nr-of-members = 2 } @@ -391,21 +391,21 @@ akka.cluster.role { You can silence the logging of cluster events at info level with configuration property: ``` -akka.cluster.log-info = off +pekko.cluster.log-info = off ``` You can enable verbose logging of cluster events at info level, e.g. for temporary troubleshooting, with configuration property: ``` -akka.cluster.log-info-verbose = on +pekko.cluster.log-info-verbose = on ``` ### Cluster Dispatcher The Cluster extension is implemented with actors. To protect them against disturbance from user actors they are by default run on the internal dispatcher configured -under `akka.actor.internal-dispatcher`. The cluster actors can potentially be isolated even -further, onto their own dispatcher using the setting `akka.cluster.use-dispatcher` +under `pekko.actor.internal-dispatcher`. The cluster actors can potentially be isolated even +further, onto their own dispatcher using the setting `pekko.cluster.use-dispatcher` or made run on the same dispatcher to keep the number of threads down. ### Configuration Compatibility Check @@ -417,14 +417,14 @@ The Configuration Compatibility Check feature ensures that all nodes in a cluste New custom checkers can be added by extending @apidoc[cluster.JoinConfigCompatChecker](JoinConfigCompatChecker) and including them in the configuration. Each checker must be associated with a unique key: ``` -akka.cluster.configuration-compatibility-check.checkers { +pekko.cluster.configuration-compatibility-check.checkers { my-custom-config = "com.company.MyCustomJoinConfigCompatChecker" } ``` @@@ note -Configuration Compatibility Check is enabled by default, but can be disabled by setting `akka.cluster.configuration-compatibility-check.enforce-on-join = off`. This is specially useful when performing rolling updates. Obviously this should only be done if a complete cluster shutdown isn't an option. A cluster with nodes with different configuration settings may lead to data loss or data corruption. +Configuration Compatibility Check is enabled by default, but can be disabled by setting `pekko.cluster.configuration-compatibility-check.enforce-on-join = off`. This is specially useful when performing rolling updates. Obviously this should only be done if a complete cluster shutdown isn't an option. A cluster with nodes with different configuration settings may lead to data loss or data corruption. This setting should only be disabled on the joining nodes. The checks are always performed on both sides, and warnings are logged. In case of incompatibilities, it is the responsibility of the joining node to decide if the process should be interrupted or not. diff --git a/docs/src/main/paradox/typed/coexisting.md b/docs/src/main/paradox/typed/coexisting.md index 59187238f3..7376312877 100644 --- a/docs/src/main/paradox/typed/coexisting.md +++ b/docs/src/main/paradox/typed/coexisting.md @@ -7,7 +7,7 @@ To use Akka Actor Typed, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-actor-typed_$scala.binary.version$ version=AkkaVersion @@ -31,7 +31,7 @@ Typed and classic can interact the following ways: * classic actor system can be converted to a typed actor system @@@ div { .group-scala } -In the examples the `akka.actor` package is aliased to `classic`. +In the examples the `pekko.actor` package is aliased to `classic`. Scala : @@snip [ClassicWatchingTypedSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/coexistence/ClassicWatchingTypedSpec.scala) { #import-alias } diff --git a/docs/src/main/paradox/typed/dispatchers.md b/docs/src/main/paradox/typed/dispatchers.md index baae8db387..43a38d5617 100644 --- a/docs/src/main/paradox/typed/dispatchers.md +++ b/docs/src/main/paradox/typed/dispatchers.md @@ -13,7 +13,7 @@ page describes how to use dispatchers with `akka-actor-typed`, which has depende @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor-typed_$scala.binary.version$" version=AkkaVersion @@ -28,15 +28,15 @@ to execute arbitrary code, for instance @scala[`Future`s]@java[`CompletableFutur ## Default dispatcher Every `ActorSystem` will have a default dispatcher that will be used in case nothing else is configured for an `Actor`. -The default dispatcher can be configured, and is by default a `Dispatcher` with the configured `akka.actor.default-dispatcher.executor`. +The default dispatcher can be configured, and is by default a `Dispatcher` with the configured `pekko.actor.default-dispatcher.executor`. If no executor is selected a "fork-join-executor" is selected, which gives excellent performance in most cases. ## Internal dispatcher To protect the internal Actors that are spawned by the various Akka modules, a separate internal dispatcher is used by default. -The internal dispatcher can be tuned in a fine-grained way with the setting `akka.actor.internal-dispatcher`, it can also -be replaced by another dispatcher by making `akka.actor.internal-dispatcher` an @ref[alias](#dispatcher-aliases). +The internal dispatcher can be tuned in a fine-grained way with the setting `pekko.actor.internal-dispatcher`, it can also +be replaced by another dispatcher by making `pekko.actor.internal-dispatcher` an @ref[alias](#dispatcher-aliases). ## Looking up a Dispatcher @@ -135,7 +135,7 @@ be used and shared among the two ids. Example: configuring `internal-dispatcher` to be an alias for `default-dispatcher`: ``` -akka.actor.internal-dispatcher = akka.actor.default-dispatcher +pekko.actor.internal-dispatcher = pekko.actor.default-dispatcher ``` @@ -196,7 +196,7 @@ Java Here the app is sending 100 messages to `BlockingActor`s and `PrintActor`s and large numbers -of `akka.actor.default-dispatcher` threads are handling requests. When you run the above code, +of `pekko.actor.default-dispatcher` threads are handling requests. When you run the above code, you will likely to see the entire application gets stuck somewhere like this: ``` @@ -234,7 +234,7 @@ and then you can apply the proposed solutions as explained below. In the above example we put the code under load by sending hundreds of messages to blocking actors which causes threads of the default dispatcher to be blocked. The fork join pool based dispatcher in Akka then attempts to compensate for this blocking by adding more threads to the pool -(`default-akka.actor.default-dispatcher 18,19,20,...`). +(`default-pekko.actor.default-dispatcher 18,19,20,...`). This however is not able to help if those too will immediately get blocked, and eventually the blocking operations will dominate the entire dispatcher. diff --git a/docs/src/main/paradox/typed/distributed-data.md b/docs/src/main/paradox/typed/distributed-data.md index 88a230d9c4..1d65bcdae4 100644 --- a/docs/src/main/paradox/typed/distributed-data.md +++ b/docs/src/main/paradox/typed/distributed-data.md @@ -12,7 +12,7 @@ To use Akka Cluster Distributed Data, you must add the following dependency in y @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-typed_$scala.binary.version$ version=AkkaVersion @@ -48,7 +48,7 @@ accessed through the @apidoc[typed.*.DistributedData] extension. The messages for the replicator, such as @apidoc[typed.*Replicator.Update] are defined as subclasses of @apidoc[typed.*Replicator.Command] -and the actual CRDTs are defined in the `akka.cluster.ddata` package, for example +and the actual CRDTs are defined in the `pekko.cluster.ddata` package, for example @apidoc[cluster.ddata.GCounter]. It requires a @scala[implicit] `org.apache.pekko.cluster.ddata.SelfUniqueAddress`, available from: @@ -171,7 +171,7 @@ for the counter our actor will receive a @scala[`Replicator.Changed[GCounter]`]@ this is not a message in our protocol, we use a message transformation function to wrap it in the internal `InternalSubscribeResponse` message, which is then handled in the regular message handling of the behavior, as shown in the above example. Subscribers will be notified of changes, if there are any, based on the -configurable `akka.cluster.distributed-data.notify-subscribers-interval`. +configurable `pekko.cluster.distributed-data.notify-subscribers-interval`. The subscriber is automatically unsubscribed if the subscriber is terminated. A subscriber can also be de-registered with the `replicatorAdapter.unsubscribe(key)` function. @@ -534,7 +534,7 @@ with Cluster Singleton it's also recommended to enable: ``` # Update and Get operations are sent to oldest nodes first. -akka.cluster.distributed-data.prefer-oldest = on +pekko.cluster.distributed-data.prefer-oldest = on ``` ### Delta-CRDT @@ -560,7 +560,7 @@ of network partitions or similar problems. The the delta propagation can be disabled with configuration property: ``` -akka.cluster.distributed-data.delta-crdt.enabled=off +pekko.cluster.distributed-data.delta-crdt.enabled=off ``` ### Custom Data Type @@ -659,7 +659,7 @@ long as at least one node from the old cluster takes part in a new cluster. The are configured with: ``` -akka.cluster.distributed-data.durable.keys = ["a", "b", "durable*"] +pekko.cluster.distributed-data.durable.keys = ["a", "b", "durable*"] ``` Prefix matching is supported by using `*` at the end of a key. @@ -667,12 +667,12 @@ Prefix matching is supported by using `*` at the end of a key. All entries can be made durable by specifying: ``` -akka.cluster.distributed-data.durable.keys = ["*"] +pekko.cluster.distributed-data.durable.keys = ["*"] ``` @scala[[LMDB](https://symas.com/lmdb/technical/)]@java[[LMDB](https://github.com/lmdbjava/lmdbjava/)] is the default storage implementation. It is possible to replace that with another implementation by implementing the actor protocol described in -`org.apache.pekko.cluster.ddata.DurableStore` and defining the `akka.cluster.distributed-data.durable.store-actor-class` +`org.apache.pekko.cluster.ddata.DurableStore` and defining the `pekko.cluster.distributed-data.durable.store-actor-class` property for the new implementation. The location of the files for the data is configured with: @@ -685,7 +685,7 @@ Scala # and its remote port. # 2. Otherwise the path is used as is, as a relative or absolute path to # a directory. -akka.cluster.distributed-data.durable.lmdb.dir = "ddata" +pekko.cluster.distributed-data.durable.lmdb.dir = "ddata" ``` Java @@ -696,7 +696,7 @@ Java # and its remote port. # 2. Otherwise the path is used as is, as a relative or absolute path to # a directory. -akka.cluster.distributed-data.durable.lmdb.dir = "ddata" +pekko.cluster.distributed-data.durable.lmdb.dir = "ddata" ``` @@ -715,7 +715,7 @@ that will be serialized and stored. The risk of losing writes if the JVM crashes data is typically replicated to other nodes immediately according to the given `WriteConsistency`. ``` -akka.cluster.distributed-data.durable.lmdb.write-behind-interval = 200 ms +pekko.cluster.distributed-data.durable.lmdb.write-behind-interval = 200 ms ``` Note that you should be prepared to receive `WriteFailure` as reply to an `Update` of a @@ -726,7 +726,7 @@ There is one important caveat when it comes pruning of @ref:[CRDT Garbage](#crdt If an old data entry that was never pruned is injected and merged with existing data after that the pruning markers have been removed the value will not be correct. The time-to-live of the markers is defined by configuration -`akka.cluster.distributed-data.durable.remove-pruning-marker-after` and is in the magnitude of days. +`pekko.cluster.distributed-data.durable.remove-pruning-marker-after` and is in the magnitude of days. This would be possible if a node with durable data didn't participate in the pruning (e.g. it was shutdown) and later started after this time. A node with durable data should not be stopped for longer time than this duration and if it is joining again after this diff --git a/docs/src/main/paradox/typed/distributed-pub-sub.md b/docs/src/main/paradox/typed/distributed-pub-sub.md index c92a29a8cf..98b21f6094 100644 --- a/docs/src/main/paradox/typed/distributed-pub-sub.md +++ b/docs/src/main/paradox/typed/distributed-pub-sub.md @@ -10,7 +10,7 @@ when used in a clustered application: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-cluster-typed_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/typed/durable-state/persistence.md b/docs/src/main/paradox/typed/durable-state/persistence.md index f2db5d2b0c..ba9c3ed1f6 100644 --- a/docs/src/main/paradox/typed/durable-state/persistence.md +++ b/docs/src/main/paradox/typed/durable-state/persistence.md @@ -10,7 +10,7 @@ To use Akka Persistence, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-persistence-typed_$scala.binary.version$ version=AkkaVersion diff --git a/docs/src/main/paradox/typed/extending.md b/docs/src/main/paradox/typed/extending.md index 9a4494eafa..39f5f44af2 100644 --- a/docs/src/main/paradox/typed/extending.md +++ b/docs/src/main/paradox/typed/extending.md @@ -60,14 +60,14 @@ The `DatabaseConnectionPool` can be looked up in this way any number of times an ## Loading from configuration To be able to load extensions from your Akka configuration you must add FQCNs of implementations of the `ExtensionId` -in the `akka.actor.typed.extensions` section of the config you provide to your `ActorSystem`. +in the `pekko.actor.typed.extensions` section of the config you provide to your `ActorSystem`. Scala : @@snip [ExtensionDocSpec.scala](/akka-actor-typed-tests/src/test/scala/docs/org/apache/pekko/typed/extensions/ExtensionDocSpec.scala) { #config } Java : ```ruby - akka.actor.typed { + pekko.actor.typed { extensions = ["jdocs.org.apache.pekko.typed.extensions.ExtensionDocTest$DatabaseConnectionPool$Id"] } ``` diff --git a/docs/src/main/paradox/typed/failure-detector.md b/docs/src/main/paradox/typed/failure-detector.md index d26b9d1928..61262ae6dc 100644 --- a/docs/src/main/paradox/typed/failure-detector.md +++ b/docs/src/main/paradox/typed/failure-detector.md @@ -82,7 +82,7 @@ by long (unexpected) garbage collection pauses, overloading the system, too rest and similar. ``` -akka.cluster.failure-detector.acceptable-heartbeat-pause = 7s +pekko.cluster.failure-detector.acceptable-heartbeat-pause = 7s ``` Another log message to watch out for that typically requires investigation of the root cause: diff --git a/docs/src/main/paradox/typed/from-classic.md b/docs/src/main/paradox/typed/from-classic.md index b2017b598f..0f45219892 100644 --- a/docs/src/main/paradox/typed/from-classic.md +++ b/docs/src/main/paradox/typed/from-classic.md @@ -28,7 +28,7 @@ For example `akka-cluster-typed`: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-typed_$scala.binary.version$ version=AkkaVersion diff --git a/docs/src/main/paradox/typed/guide/modules.md b/docs/src/main/paradox/typed/guide/modules.md index 704e8ce0d3..e624cb0650 100644 --- a/docs/src/main/paradox/typed/guide/modules.md +++ b/docs/src/main/paradox/typed/guide/modules.md @@ -38,7 +38,7 @@ This page does not list all available modules, but overviews the main functional @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-actor-typed_$scala.binary.version$ version=AkkaVersion @@ -66,7 +66,7 @@ Challenges that actors solve include the following: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-remote_$scala.binary.version$ version=AkkaVersion @@ -92,7 +92,7 @@ Challenges Remoting solves include the following: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-typed_$scala.binary.version$ version=AkkaVersion @@ -118,7 +118,7 @@ Challenges the Cluster module solves include the following: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-sharding-typed_$scala.binary.version$ version=AkkaVersion @@ -140,7 +140,7 @@ Challenges that Sharding solves include the following: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-singleton_$scala.binary.version$ version=AkkaVersion @@ -164,7 +164,7 @@ The Singleton module can be used to solve these challenges: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-persistence-typed_$scala.binary.version$ version=AkkaVersion @@ -189,7 +189,7 @@ Persistence tackles the following challenges: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-projection-core_$scala.binary.version$ version=AkkaVersion @@ -209,7 +209,7 @@ Challenges Projections solve include the following: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-typed_$scala.binary.version$ version=AkkaVersion @@ -231,7 +231,7 @@ Distributed Data is intended to solve the following challenges: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-stream-typed_$scala.binary.version$ version=AkkaVersion diff --git a/docs/src/main/paradox/typed/guide/tutorial_1.md b/docs/src/main/paradox/typed/guide/tutorial_1.md index 15cc334571..b8463cc2b9 100644 --- a/docs/src/main/paradox/typed/guide/tutorial_1.md +++ b/docs/src/main/paradox/typed/guide/tutorial_1.md @@ -7,7 +7,7 @@ Add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor-typed_$scala.binary.version$" version=AkkaVersion @@ -126,7 +126,7 @@ supervised actor started supervised actor fails now supervised actor will be restarted supervised actor started -[ERROR] [11/12/2018 12:03:27.171] [ActorHierarchyExperiments-akka.actor.default-dispatcher-2] [akka://ActorHierarchyExperiments/user/supervising-actor/supervised-actor] Supervisor akka.actor.typed.internal.RestartSupervisor@1c452254 saw failure: I failed! +[ERROR] [11/12/2018 12:03:27.171] [ActorHierarchyExperiments-pekko.actor.default-dispatcher-2] [akka://ActorHierarchyExperiments/user/supervising-actor/supervised-actor] Supervisor pekko.actor.typed.internal.RestartSupervisor@1c452254 saw failure: I failed! java.lang.Exception: I failed! at typed.tutorial_1.SupervisedActor.onMessage(ActorHierarchyExperiments.scala:113) at typed.tutorial_1.SupervisedActor.onMessage(ActorHierarchyExperiments.scala:106) diff --git a/docs/src/main/paradox/typed/interaction-patterns.md b/docs/src/main/paradox/typed/interaction-patterns.md index 47efb6657f..cb1c52fac5 100644 --- a/docs/src/main/paradox/typed/interaction-patterns.md +++ b/docs/src/main/paradox/typed/interaction-patterns.md @@ -9,7 +9,7 @@ To use Akka Actor Typed, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-actor-typed_$scala.binary.version$ version=AkkaVersion diff --git a/docs/src/main/paradox/typed/logging.md b/docs/src/main/paradox/typed/logging.md index dd0157a209..153632f881 100644 --- a/docs/src/main/paradox/typed/logging.md +++ b/docs/src/main/paradox/typed/logging.md @@ -13,7 +13,7 @@ via the SLF4J backend, such as Logback configuration. @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor-typed_$scala.binary.version$" version=AkkaVersion @@ -259,7 +259,7 @@ SLF4J or directly to standard out. When `akka-actor-typed` and `akka-slf4j` are on the classpath this event handler actor will emit the events to SLF4J. The @apidoc[event.slf4j.Slf4jLogger](Slf4jLogger) and @apidoc[event.slf4j.Slf4jLoggingFilter](Slf4jLoggingFilter) are enabled automatically -without additional configuration. This can be disabled by `akka.use-slf4j=off` configuration property. +without additional configuration. This can be disabled by `pekko.use-slf4j=off` configuration property. In other words, you don't have to do anything for the Akka internal logging to end up in your configured SLF4J backend. @@ -270,23 +270,23 @@ Ultimately the log level defined in the SLF4J backend is used. For the Akka inte also check the level defined by the SLF4J backend before constructing the final log message and emitting it to the event bus. -However, there is an additional `akka.loglevel` configuration property that defines if logging events +However, there is an additional `pekko.loglevel` configuration property that defines if logging events with lower log level should be discarded immediately without consulting the SLF4J backend. By default this is at `INFO` level, which means that `DEBUG` level logging from the Akka internals will not reach the SLF4J backend even if `DEBUG` is enabled in the backend. -You can enable `DEBUG` level for `akka.loglevel` and control the actual level in the SLF4j backend +You can enable `DEBUG` level for `pekko.loglevel` and control the actual level in the SLF4j backend without any significant overhead, also for production. ``` -akka.loglevel = "DEBUG" +pekko.loglevel = "DEBUG" ``` To turn off all Akka internal logging (not recommended) you can configure the log levels to be `OFF` like this. ``` -akka { +pekko { stdout-loglevel = "OFF" loglevel = "OFF" } @@ -303,7 +303,7 @@ modules of Akka. When the actor system is starting up and shutting down the configured `loggers` are not used. Instead log messages are printed to stdout (System.out). The default log level for this stdout logger is `WARNING` and it can be silenced completely by setting -`akka.stdout-loglevel=OFF`. +`pekko.stdout-loglevel=OFF`. ### Logging of Dead Letters @@ -316,7 +316,7 @@ messages in the actor mailboxes are sent to dead letters. You can also disable l of dead letters during shutdown. ``` -akka { +pekko { log-dead-letters = 10 log-dead-letters-during-shutdown = on } @@ -332,7 +332,7 @@ Akka has a few configuration options for very low level debugging. These make mo You almost definitely need to have logging set to DEBUG to use any of the options below: ``` -akka { +pekko { loglevel = "DEBUG" } ``` @@ -340,7 +340,7 @@ akka { This config option is very good if you want to know what config settings are loaded by Akka: ``` -akka { +pekko { # Log the complete configuration at INFO level when the actor system is started. # This is useful when you are uncertain of what configuration is used. log-config-on-start = on @@ -350,7 +350,7 @@ akka { If you want unhandled messages logged at DEBUG: ``` -akka { +pekko { actor { debug { # enable DEBUG logging of unhandled messages @@ -363,7 +363,7 @@ akka { If you want to monitor subscriptions (subscribe/unsubscribe) on the ActorSystem.eventStream: ``` -akka { +pekko { actor { debug { # enable DEBUG logging of subscription changes on the eventStream @@ -380,7 +380,7 @@ If you want to see all messages that are sent through remoting at DEBUG log leve Note that this logs the messages as they are sent by the transport layer, not by an actor. ``` -akka.remote.artery { +pekko.remote.artery { # If this is "on", Akka will log all outbound messages at DEBUG level, # if off then they are not logged log-sent-messages = on @@ -391,7 +391,7 @@ If you want to see all messages that are received through remoting at DEBUG log Note that this logs the messages as they are received by the transport layer, not by an actor. ``` -akka.remote.artery { +pekko.remote.artery { # If this is "on", Akka will log all inbound messages at DEBUG level, # if off then they are not logged log-received-messages = on @@ -401,7 +401,7 @@ akka.remote.artery { Logging of message types with payload size in bytes larger than the configured `log-frame-size-exceeding`. ``` -akka.remote.artery { +pekko.remote.artery { log-frame-size-exceeding = 10000b } ``` @@ -451,7 +451,7 @@ troubleshooting. Those logger names are typically prefixed with the package name For example, in Logback the configuration may look like this to enable debug logging for Cluster Sharding: ``` - + @@ -461,18 +461,18 @@ For example, in Logback the configuration may look like this to enable debug log Other examples of logger names or prefixes: ``` -akka.cluster -akka.cluster.Cluster -akka.cluster.ClusterHeartbeat -akka.cluster.ClusterGossip -akka.cluster.ddata -akka.cluster.pubsub -akka.cluster.singleton -akka.cluster.sharding -akka.coordination.lease -akka.discovery -akka.persistence -akka.remote +pekko.cluster +pekko.cluster.Cluster +pekko.cluster.ClusterHeartbeat +pekko.cluster.ClusterGossip +pekko.cluster.ddata +pekko.cluster.pubsub +pekko.cluster.singleton +pekko.cluster.sharding +pekko.coordination.lease +pekko.discovery +pekko.persistence +pekko.remote ``` ## Logging in tests diff --git a/docs/src/main/paradox/typed/mailboxes.md b/docs/src/main/paradox/typed/mailboxes.md index c881c9704e..dec72b721d 100644 --- a/docs/src/main/paradox/typed/mailboxes.md +++ b/docs/src/main/paradox/typed/mailboxes.md @@ -10,7 +10,7 @@ page describes how to use mailboxes with `akka-actor-typed`, which has dependenc @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-actor-typed_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/typed/persistence-snapshot.md b/docs/src/main/paradox/typed/persistence-snapshot.md index e88563dd80..0f98c1fced 100644 --- a/docs/src/main/paradox/typed/persistence-snapshot.md +++ b/docs/src/main/paradox/typed/persistence-snapshot.md @@ -51,7 +51,7 @@ A recovery where no saved snapshot matches the specified `SnapshotSelectionCrite events. This can be useful if snapshot serialization format has changed in an incompatible way. It should typically not be used when events have been deleted. -In order to use snapshots, a default snapshot-store (`akka.persistence.snapshot-store.plugin`) must be configured, +In order to use snapshots, a default snapshot-store (`pekko.persistence.snapshot-store.plugin`) must be configured, or you can pick a snapshot store for for a specific `EventSourcedBehavior` by @scala[defining it with `withSnapshotPluginId` of the `EventSourcedBehavior`]@java[overriding `snapshotPluginId` in the `EventSourcedBehavior`]. diff --git a/docs/src/main/paradox/typed/persistence-testing.md b/docs/src/main/paradox/typed/persistence-testing.md index 16d6496cd2..685c47e8c0 100644 --- a/docs/src/main/paradox/typed/persistence-testing.md +++ b/docs/src/main/paradox/typed/persistence-testing.md @@ -7,7 +7,7 @@ To use Akka Persistence TestKit, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group1=com.typesafe.akka artifact1=akka-persistence-typed_$scala.binary.version$ version1=AkkaVersion @@ -65,7 +65,7 @@ To use the testkit you need to add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-persistence-testkit_$scala.binary.version$" version=AkkaVersion @@ -202,7 +202,7 @@ the plugins at the same time. To coordinate initialization you can use the `Pers @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group="com.typesafe.akka" artifact="akka-persistence-testkit_$scala.binary.version$" version=AkkaVersion diff --git a/docs/src/main/paradox/typed/persistence.md b/docs/src/main/paradox/typed/persistence.md index 88f2269e3b..af4c6a953f 100644 --- a/docs/src/main/paradox/typed/persistence.md +++ b/docs/src/main/paradox/typed/persistence.md @@ -12,7 +12,7 @@ To use Akka Persistence, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-persistence-typed_$scala.binary.version$ version=AkkaVersion @@ -479,7 +479,7 @@ to not overload the system and the backend data store. When exceeding the limit until other recoveries have been completed. This is configured by: ``` -akka.persistence.max-concurrent-recoveries = 50 +pekko.persistence.max-concurrent-recoveries = 50 ``` The @ref:[event handler](#event-handler) is used for updating the state when replaying the journaled events. @@ -507,7 +507,7 @@ There could be cases where event streams are corrupted and multiple writers (i.e journaled different messages with the same sequence number. In such a case, you can configure how you filter replayed messages from multiple writers, upon recovery. -In your configuration, under the `akka.persistence.journal.xxx.replay-filter` section (where `xxx` is your journal plugin id), +In your configuration, under the `pekko.persistence.journal.xxx.replay-filter` section (where `xxx` is your journal plugin id), you can select the replay filter `mode` from one of the following values: * repair-by-discard-old @@ -520,7 +520,7 @@ For example, if you configure the replay filter for leveldb plugin, it looks lik ``` # The replay filter can detect a corrupt event stream by inspecting # sequence numbers and writerUuid when replaying events. -akka.persistence.journal.leveldb.replay-filter { +pekko.persistence.journal.leveldb.replay-filter { # What the filter should do when detecting invalid events. # Supported values: # `repair-by-discard-old` : discard events from old writers, @@ -643,7 +643,7 @@ You should be careful to not send more messages to a persistent actor than it ca buffer will fill up and when reaching its maximum capacity the commands will be dropped. The capacity can be configured with: ``` -akka.persistence.typed.stash-capacity = 10000 +pekko.persistence.typed.stash-capacity = 10000 ``` Note that the stashed commands are kept in an in-memory buffer, so in case of a crash they will not be diff --git a/docs/src/main/paradox/typed/reliable-delivery.md b/docs/src/main/paradox/typed/reliable-delivery.md index a79d2fdf1b..cb30e4532c 100644 --- a/docs/src/main/paradox/typed/reliable-delivery.md +++ b/docs/src/main/paradox/typed/reliable-delivery.md @@ -20,7 +20,7 @@ To use reliable delivery, add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-actor-typed_$scala.binary.version$ version=AkkaVersion @@ -257,7 +257,7 @@ To use reliable delivery with Cluster Sharding, add the following module to your @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-cluster-sharding-typed_$scala.binary.version$ version=AkkaVersion @@ -366,7 +366,7 @@ When using the `EventSourcedProducerQueue` the following dependency is needed: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-persistence-typed_$scala.binary.version$ version=AkkaVersion @@ -426,7 +426,7 @@ again on the consumer side. Serialization and deserialization is performed by the `ProducerController` and `ConsumerController` respectively instead of in the remote transport layer. -This is enabled by configuration `akka.reliable-delivery.producer-controller.chunk-large-messages` and defines +This is enabled by configuration `pekko.reliable-delivery.producer-controller.chunk-large-messages` and defines the maximum size in bytes of the chunked pieces. Messages smaller than the configured size are not chunked, but serialization still takes place in the `ProducerController` and `ConsumerController`. @@ -438,7 +438,7 @@ This feature is not implemented for @ref:[Work pulling](#work-pulling) and @ref: ## Configuration -There are several configuration properties, please refer to `akka.reliable-delivery` config section in the +There are several configuration properties, please refer to `pekko.reliable-delivery` config section in the reference configuration: * @ref:[akka-actor-typed reference configuration](../general/configuration-reference.md#config-akka-actor-typed) diff --git a/docs/src/main/paradox/typed/routers.md b/docs/src/main/paradox/typed/routers.md index d5fd930ed1..7784d74708 100644 --- a/docs/src/main/paradox/typed/routers.md +++ b/docs/src/main/paradox/typed/routers.md @@ -9,7 +9,7 @@ To use Akka Actor Typed, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-actor-typed_$scala.binary.version$ version=AkkaVersion diff --git a/docs/src/main/paradox/typed/stash.md b/docs/src/main/paradox/typed/stash.md index c9509f5f11..61db63ba33 100644 --- a/docs/src/main/paradox/typed/stash.md +++ b/docs/src/main/paradox/typed/stash.md @@ -9,7 +9,7 @@ To use Akka Actor Typed, you must add the following dependency in your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-actor-typed_$scala.binary.version$ version=AkkaVersion diff --git a/docs/src/main/paradox/typed/testing.md b/docs/src/main/paradox/typed/testing.md index fe310e0d64..5ebe6df5a9 100644 --- a/docs/src/main/paradox/typed/testing.md +++ b/docs/src/main/paradox/typed/testing.md @@ -9,7 +9,7 @@ To use Actor TestKit add the module to your project: @@dependency[sbt,Maven,Gradle] { bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion symbol1=AkkaVersion - value1="$akka.version$" + value1="$pekko.version$" group=com.typesafe.akka artifact=akka-actor-testkit-typed_$scala.binary.version$ version=AkkaVersion diff --git a/docs/src/test/java-jdk9-only/jdocs/stream/operators/source/AsSubscriber.java b/docs/src/test/java-jdk9-only/jdocs/stream/operators/source/AsSubscriber.java index 588838b6de..26fa23d04c 100644 --- a/docs/src/test/java-jdk9-only/jdocs/stream/operators/source/AsSubscriber.java +++ b/docs/src/test/java-jdk9-only/jdocs/stream/operators/source/AsSubscriber.java @@ -23,10 +23,10 @@ public interface AsSubscriber { public static final class Source { public // #api - static akka.stream.javadsl.Source> asSubscriber() + static pekko.stream.javadsl.Source> asSubscriber() // #api { - return akka.stream.javadsl.JavaFlowSupport.Source.asSubscriber(); + return pekko.stream.javadsl.JavaFlowSupport.Source.asSubscriber(); } } } diff --git a/docs/src/test/java-jdk9-only/jdocs/stream/operators/source/FromPublisher.java b/docs/src/test/java-jdk9-only/jdocs/stream/operators/source/FromPublisher.java index eb257a9527..3a251a9498 100644 --- a/docs/src/test/java-jdk9-only/jdocs/stream/operators/source/FromPublisher.java +++ b/docs/src/test/java-jdk9-only/jdocs/stream/operators/source/FromPublisher.java @@ -22,10 +22,10 @@ public interface FromPublisher { public static final class Source { public // #api - static akka.stream.javadsl.Source fromPublisher(Publisher publisher) + static pekko.stream.javadsl.Source fromPublisher(Publisher publisher) // #api { - return akka.stream.javadsl.JavaFlowSupport.Source.fromPublisher(publisher); + return pekko.stream.javadsl.JavaFlowSupport.Source.fromPublisher(publisher); } } } diff --git a/docs/src/test/java/jdocs/actor/ActorDocTest.java b/docs/src/test/java/jdocs/actor/ActorDocTest.java index f45cffa8ee..96e0fd9e8e 100644 --- a/docs/src/test/java/jdocs/actor/ActorDocTest.java +++ b/docs/src/test/java/jdocs/actor/ActorDocTest.java @@ -55,7 +55,7 @@ public class ActorDocTest extends AbstractJavaTest { public static Config config = ConfigFactory.parseString( - "akka {\n" + "pekko {\n" + " loggers = [\"org.apache.pekko.testkit.TestEventListener\"]\n" + " loglevel = \"WARNING\"\n" + " stdout-loglevel = \"WARNING\"\n" diff --git a/docs/src/test/java/jdocs/actor/FaultHandlingDocSample.java b/docs/src/test/java/jdocs/actor/FaultHandlingDocSample.java index 7e2a0b829d..229ed010c8 100644 --- a/docs/src/test/java/jdocs/actor/FaultHandlingDocSample.java +++ b/docs/src/test/java/jdocs/actor/FaultHandlingDocSample.java @@ -41,8 +41,8 @@ public class FaultHandlingDocSample { public static void main(String[] args) { Config config = ConfigFactory.parseString( - "akka.loglevel = \"DEBUG\"\n" - + "akka.actor.debug {\n" + "pekko.loglevel = \"DEBUG\"\n" + + "pekko.actor.debug {\n" + " receive = on\n" + " lifecycle = on\n" + "}\n"); diff --git a/docs/src/test/java/jdocs/actor/FaultHandlingTest.java b/docs/src/test/java/jdocs/actor/FaultHandlingTest.java index 799c53d238..fd3bcdfbd4 100644 --- a/docs/src/test/java/jdocs/actor/FaultHandlingTest.java +++ b/docs/src/test/java/jdocs/actor/FaultHandlingTest.java @@ -44,7 +44,7 @@ public class FaultHandlingTest extends AbstractJavaTest { public static Config config = ConfigFactory.parseString( - "akka {\n" + "pekko {\n" + " loggers = [\"org.apache.pekko.testkit.TestEventListener\"]\n" + " loglevel = \"WARNING\"\n" + " stdout-loglevel = \"WARNING\"\n" diff --git a/docs/src/test/java/jdocs/cluster/FactorialFrontendMain.java b/docs/src/test/java/jdocs/cluster/FactorialFrontendMain.java index 3db0b30619..3448dd83fc 100644 --- a/docs/src/test/java/jdocs/cluster/FactorialFrontendMain.java +++ b/docs/src/test/java/jdocs/cluster/FactorialFrontendMain.java @@ -18,7 +18,7 @@ public class FactorialFrontendMain { final int upToN = 200; final Config config = - ConfigFactory.parseString("akka.cluster.roles = [frontend]") + ConfigFactory.parseString("pekko.cluster.roles = [frontend]") .withFallback(ConfigFactory.load("factorial")); final ActorSystem system = ActorSystem.create("ClusterSystem", config); diff --git a/docs/src/test/java/jdocs/cluster/StatsSampleOneMasterMain.java b/docs/src/test/java/jdocs/cluster/StatsSampleOneMasterMain.java index 2ab42220dd..465305f2ac 100644 --- a/docs/src/test/java/jdocs/cluster/StatsSampleOneMasterMain.java +++ b/docs/src/test/java/jdocs/cluster/StatsSampleOneMasterMain.java @@ -30,8 +30,8 @@ public class StatsSampleOneMasterMain { for (String port : ports) { // Override the configuration of the port Config config = - ConfigFactory.parseString("akka.remote.classic.netty.tcp.port=" + port) - .withFallback(ConfigFactory.parseString("akka.cluster.roles = [compute]")) + ConfigFactory.parseString("pekko.remote.classic.netty.tcp.port=" + port) + .withFallback(ConfigFactory.parseString("pekko.cluster.roles = [compute]")) .withFallback(ConfigFactory.load("stats2")); ActorSystem system = ActorSystem.create("ClusterSystem", config); diff --git a/docs/src/test/java/jdocs/config/ConfigDocTest.java b/docs/src/test/java/jdocs/config/ConfigDocTest.java index a8a93bd796..33a1426d37 100644 --- a/docs/src/test/java/jdocs/config/ConfigDocTest.java +++ b/docs/src/test/java/jdocs/config/ConfigDocTest.java @@ -20,7 +20,7 @@ public class ConfigDocTest { public void customConfig() { // #custom-config - Config customConf = ConfigFactory.parseString("akka.log-config-on-start = on"); + Config customConf = ConfigFactory.parseString("pekko.log-config-on-start = on"); // ConfigFactory.load sandwiches customConfig between default reference // config and default overrides, and then resolves it. ActorSystem system = diff --git a/docs/src/test/java/jdocs/discovery/DnsDiscoveryDocTest.java b/docs/src/test/java/jdocs/discovery/DnsDiscoveryDocTest.java index df356346b9..243b648f86 100644 --- a/docs/src/test/java/jdocs/discovery/DnsDiscoveryDocTest.java +++ b/docs/src/test/java/jdocs/discovery/DnsDiscoveryDocTest.java @@ -49,7 +49,7 @@ public class DnsDiscoveryDocTest extends JUnitSuite { result.toCompletableFuture().get(5, TimeUnit.SECONDS); } catch (Exception e) { - system.log().warning("Failed lookup akka.io, but ignoring: " + e); + system.log().warning("Failed lookup pekko.io, but ignoring: " + e); // don't fail this test } } diff --git a/docs/src/test/java/jdocs/io/japi/EchoServer.java b/docs/src/test/java/jdocs/io/japi/EchoServer.java index 44aaf0cb36..f721275779 100644 --- a/docs/src/test/java/jdocs/io/japi/EchoServer.java +++ b/docs/src/test/java/jdocs/io/japi/EchoServer.java @@ -17,7 +17,7 @@ import com.typesafe.config.ConfigFactory; public class EchoServer { public static void main(String[] args) throws InterruptedException { - final Config config = ConfigFactory.parseString("akka.loglevel=DEBUG"); + final Config config = ConfigFactory.parseString("pekko.loglevel=DEBUG"); final ActorSystem system = ActorSystem.create("EchoServer", config); try { final CountDownLatch latch = new CountDownLatch(1); diff --git a/docs/src/test/java/jdocs/persistence/LambdaPersistencePluginDocTest.java b/docs/src/test/java/jdocs/persistence/LambdaPersistencePluginDocTest.java index ce3ca99d63..f6ee6e2413 100644 --- a/docs/src/test/java/jdocs/persistence/LambdaPersistencePluginDocTest.java +++ b/docs/src/test/java/jdocs/persistence/LambdaPersistencePluginDocTest.java @@ -139,8 +139,8 @@ public class LambdaPersistencePluginDocTest { public MyJournalSpecTest() { super( ConfigFactory.parseString( - "akka.persistence.journal.plugin = " - + "\"akka.persistence.journal.leveldb-shared\"")); + "pekko.persistence.journal.plugin = " + + "\"pekko.persistence.journal.leveldb-shared\"")); } @Override @@ -160,8 +160,8 @@ public class LambdaPersistencePluginDocTest { public MySnapshotStoreTest() { super( ConfigFactory.parseString( - "akka.persistence.snapshot-store.plugin = " - + "\"akka.persistence.snapshot-store.local\"")); + "pekko.persistence.snapshot-store.plugin = " + + "\"pekko.persistence.snapshot-store.local\"")); } } // #snapshot-store-tck-java @@ -180,13 +180,13 @@ public class LambdaPersistencePluginDocTest { super( ConfigFactory.parseString( "persistence.journal.plugin = " - + "\"akka.persistence.journal.leveldb-shared\"")); + + "\"pekko.persistence.journal.leveldb-shared\"")); Config config = system().settings().config(); storageLocations.add( - new File(config.getString("akka.persistence.journal.leveldb.dir"))); + new File(config.getString("pekko.persistence.journal.leveldb.dir"))); storageLocations.add( - new File(config.getString("akka.persistence.snapshot-store.local.dir"))); + new File(config.getString("pekko.persistence.snapshot-store.local.dir"))); } @Override diff --git a/docs/src/test/java/jdocs/persistence/PersistenceMultiDocTest.java b/docs/src/test/java/jdocs/persistence/PersistenceMultiDocTest.java index 74104593f9..ca543dd4c2 100644 --- a/docs/src/test/java/jdocs/persistence/PersistenceMultiDocTest.java +++ b/docs/src/test/java/jdocs/persistence/PersistenceMultiDocTest.java @@ -30,13 +30,13 @@ public class PersistenceMultiDocTest { // Absolute path to the journal plugin configuration entry in the `reference.conf` @Override public String journalPluginId() { - return "akka.persistence.chronicle.journal"; + return "pekko.persistence.chronicle.journal"; } // Absolute path to the snapshot store plugin configuration entry in the `reference.conf` @Override public String snapshotPluginId() { - return "akka.persistence.chronicle.snapshot-store"; + return "pekko.persistence.chronicle.snapshot-store"; } } // #override-plugins diff --git a/docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java b/docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java index 7d82beb020..15bb46fe07 100644 --- a/docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java +++ b/docs/src/test/java/jdocs/persistence/PersistenceQueryDocTest.java @@ -100,8 +100,8 @@ public class PersistenceQueryDocTest { * You can use `NoOffset` to retrieve all events with a given tag or retrieve a subset of all * events by specifying a `Sequence` `offset`. The `offset` corresponds to an ordered sequence * number for the specific tag. Note that the corresponding offset of each event is provided in - * the [[akka.persistence.query.EventEnvelope]], which makes it possible to resume the stream at - * a later point from a given offset. + * the [[pekko.persistence.query.EventEnvelope]], which makes it possible to resume the stream + * at a later point from a given offset. * *

The `offset` is exclusive, i.e. the event with the exact same sequence number will not be * included in the returned stream. This means that you can use the offset that is returned in @@ -207,7 +207,7 @@ public class PersistenceQueryDocTest { final MyJavadslReadJournal readJournal = PersistenceQuery.get(system) .getReadJournalFor( - MyJavadslReadJournal.class, "akka.persistence.query.my-read-journal"); + MyJavadslReadJournal.class, "pekko.persistence.query.my-read-journal"); // issue query to journal Source source = @@ -222,7 +222,7 @@ public class PersistenceQueryDocTest { final MyJavadslReadJournal readJournal = PersistenceQuery.get(system) .getReadJournalFor( - MyJavadslReadJournal.class, "akka.persistence.query.my-read-journal"); + MyJavadslReadJournal.class, "pekko.persistence.query.my-read-journal"); // #all-persistence-ids-live readJournal.persistenceIds(); @@ -235,7 +235,7 @@ public class PersistenceQueryDocTest { final MyJavadslReadJournal readJournal = PersistenceQuery.get(system) .getReadJournalFor( - MyJavadslReadJournal.class, "akka.persistence.query.my-read-journal"); + MyJavadslReadJournal.class, "pekko.persistence.query.my-read-journal"); // #all-persistence-ids-snap readJournal.currentPersistenceIds(); @@ -248,7 +248,7 @@ public class PersistenceQueryDocTest { final MyJavadslReadJournal readJournal = PersistenceQuery.get(system) .getReadJournalFor( - MyJavadslReadJournal.class, "akka.persistence.query.my-read-journal"); + MyJavadslReadJournal.class, "pekko.persistence.query.my-read-journal"); // #events-by-persistent-id readJournal.eventsByPersistenceId("user-us-1337", 0L, Long.MAX_VALUE); @@ -261,7 +261,7 @@ public class PersistenceQueryDocTest { final MyJavadslReadJournal readJournal = PersistenceQuery.get(system) .getReadJournalFor( - MyJavadslReadJournal.class, "akka.persistence.query.my-read-journal"); + MyJavadslReadJournal.class, "pekko.persistence.query.my-read-journal"); // #events-by-tag // assuming journal is able to work with numeric offsets we can: @@ -294,7 +294,7 @@ public class PersistenceQueryDocTest { final MyJavadslReadJournal readJournal = PersistenceQuery.get(system) .getReadJournalFor( - MyJavadslReadJournal.class, "akka.persistence.query.my-read-journal"); + MyJavadslReadJournal.class, "pekko.persistence.query.my-read-journal"); // #advanced-journal-query-usage @@ -339,7 +339,7 @@ public class PersistenceQueryDocTest { final MyJavadslReadJournal readJournal = PersistenceQuery.get(system) .getReadJournalFor( - MyJavadslReadJournal.class, "akka.persistence.query.my-read-journal"); + MyJavadslReadJournal.class, "pekko.persistence.query.my-read-journal"); // #projection-into-different-store-rs final ReactiveStreamsCompatibleDBDriver driver = new ReactiveStreamsCompatibleDBDriver(); @@ -371,7 +371,7 @@ public class PersistenceQueryDocTest { final MyJavadslReadJournal readJournal = PersistenceQuery.get(system) .getReadJournalFor( - MyJavadslReadJournal.class, "akka.persistence.query.my-read-journal"); + MyJavadslReadJournal.class, "pekko.persistence.query.my-read-journal"); // #projection-into-different-store-simple final ExampleStore store = new ExampleStore(); diff --git a/docs/src/test/java/jdocs/persistence/testkit/PersistenceInitTest.java b/docs/src/test/java/jdocs/persistence/testkit/PersistenceInitTest.java index 2a573b29da..395a592625 100644 --- a/docs/src/test/java/jdocs/persistence/testkit/PersistenceInitTest.java +++ b/docs/src/test/java/jdocs/persistence/testkit/PersistenceInitTest.java @@ -28,10 +28,10 @@ public class PersistenceInitTest extends AbstractJavaTest { public static final TestKitJunitResource testKit = new TestKitJunitResource( ConfigFactory.parseString( - "akka.persistence.journal.plugin = \"akka.persistence.journal.inmem\" \n" - + "akka.persistence.journal.inmem.test-serialization = on \n" - + "akka.persistence.snapshot-store.plugin = \"akka.persistence.snapshot-store.local\" \n" - + "akka.persistence.snapshot-store.local.dir = \"target/snapshot-" + "pekko.persistence.journal.plugin = \"pekko.persistence.journal.inmem\" \n" + + "pekko.persistence.journal.inmem.test-serialization = on \n" + + "pekko.persistence.snapshot-store.plugin = \"pekko.persistence.snapshot-store.local\" \n" + + "pekko.persistence.snapshot-store.local.dir = \"target/snapshot-" + UUID.randomUUID().toString() + "\" \n") .withFallback(ConfigFactory.defaultApplication())); diff --git a/docs/src/test/java/jdocs/remoting/RemoteDeploymentDocTest.java b/docs/src/test/java/jdocs/remoting/RemoteDeploymentDocTest.java index 62708beb51..af45808088 100644 --- a/docs/src/test/java/jdocs/remoting/RemoteDeploymentDocTest.java +++ b/docs/src/test/java/jdocs/remoting/RemoteDeploymentDocTest.java @@ -40,10 +40,10 @@ public class RemoteDeploymentDocTest extends AbstractJavaTest { new AkkaJUnitActorSystemResource( "RemoteDeploymentDocTest", ConfigFactory.parseString( - " akka.actor.provider = remote\n" - + " akka.remote.classic.netty.tcp.port = 0\n" - + " akka.remote.artery.canonical.port = 0\n" - + " akka.remote.use-unsafe-remote-features-outside-cluster = on") + " pekko.actor.provider = remote\n" + + " pekko.remote.classic.netty.tcp.port = 0\n" + + " pekko.remote.artery.canonical.port = 0\n" + + " pekko.remote.use-unsafe-remote-features-outside-cluster = on") .withFallback(AkkaSpec.testConf())); private final ActorSystem system = actorSystemResource.getSystem(); @@ -81,12 +81,12 @@ public class RemoteDeploymentDocTest extends AbstractJavaTest { @Test public void demonstrateProgrammaticConfig() { // #programmatic - ConfigFactory.parseString("akka.remote.classic.netty.tcp.hostname=\"1.2.3.4\"") + ConfigFactory.parseString("pekko.remote.classic.netty.tcp.hostname=\"1.2.3.4\"") .withFallback(ConfigFactory.load()); // #programmatic // #programmatic-artery - ConfigFactory.parseString("akka.remote.artery.canonical.hostname=\"1.2.3.4\"") + ConfigFactory.parseString("pekko.remote.artery.canonical.hostname=\"1.2.3.4\"") .withFallback(ConfigFactory.load()); // #programmatic-artery } diff --git a/docs/src/test/java/jdocs/stream/FlowDocTest.java b/docs/src/test/java/jdocs/stream/FlowDocTest.java index 94e8f7cc62..c144169a1c 100644 --- a/docs/src/test/java/jdocs/stream/FlowDocTest.java +++ b/docs/src/test/java/jdocs/stream/FlowDocTest.java @@ -125,7 +125,7 @@ public class FlowDocTest extends AbstractJavaTest { final Object tick = new Object(); final Duration oneSecond = Duration.ofSeconds(1); - // akka.actor.Cancellable + // pekko.actor.Cancellable final Source timer = Source.tick(oneSecond, oneSecond, tick); Sink.ignore().runWith(timer, system); diff --git a/docs/src/test/java/jdocs/stream/IntegrationDocTest.java b/docs/src/test/java/jdocs/stream/IntegrationDocTest.java index c0108854cf..50bd4e1ee0 100644 --- a/docs/src/test/java/jdocs/stream/IntegrationDocTest.java +++ b/docs/src/test/java/jdocs/stream/IntegrationDocTest.java @@ -58,7 +58,7 @@ public class IntegrationDocTest extends AbstractJavaTest { + " core-pool-size-max = 10 \n" + " } \n" + "} \n" - + "akka.actor.default-mailbox.mailbox-type = akka.dispatch.UnboundedMailbox\n"); + + "pekko.actor.default-mailbox.mailbox-type = pekko.dispatch.UnboundedMailbox\n"); system = ActorSystem.create("IntegrationDocTest", config); ref = system.actorOf(Props.create(Translator.class)); diff --git a/docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java b/docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java index 41bfb6b635..cd2489a86a 100644 --- a/docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java +++ b/docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeLoggingElements.java @@ -31,7 +31,7 @@ public class RecipeLoggingElements extends RecipeTest { ActorSystem.create( "RecipeLoggingElements", ConfigFactory.parseString( - "akka.loglevel=DEBUG\nakka.loggers = [org.apache.pekko.testkit.TestEventListener]")); + "pekko.loglevel=DEBUG\npekko.loggers = [org.apache.pekko.testkit.TestEventListener]")); } @AfterClass diff --git a/docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeSourceFromFunction.java b/docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeSourceFromFunction.java index 4a75db6cc3..022dcba1fb 100644 --- a/docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeSourceFromFunction.java +++ b/docs/src/test/java/jdocs/stream/javadsl/cookbook/RecipeSourceFromFunction.java @@ -28,7 +28,7 @@ public class RecipeSourceFromFunction extends RecipeTest { ActorSystem.create( "RecipeSourceFromFunction", ConfigFactory.parseString( - "akka.loglevel=DEBUG\nakka.loggers = [org.apache.pekko.testkit.TestEventListener]")); + "pekko.loglevel=DEBUG\nakka.loggers = [org.apache.pekko.testkit.TestEventListener]")); } @AfterClass diff --git a/docs/src/test/java/jdocs/stream/operators/source/Restart.java b/docs/src/test/java/jdocs/stream/operators/source/Restart.java index d3b89b3b90..144f7bc20f 100644 --- a/docs/src/test/java/jdocs/stream/operators/source/Restart.java +++ b/docs/src/test/java/jdocs/stream/operators/source/Restart.java @@ -40,33 +40,33 @@ public class Restart { forever.runWith( Sink.foreach((Creator nr) -> system.log().info("{}", nr.create())), system); // logs - // [INFO] [12/10/2019 13:51:58.300] [default-akka.test.stream-dispatcher-7] - // [akka.actor.ActorSystemImpl(default)] 1 - // [INFO] [12/10/2019 13:51:58.301] [default-akka.test.stream-dispatcher-7] - // [akka.actor.ActorSystemImpl(default)] 2 - // [INFO] [12/10/2019 13:51:58.302] [default-akka.test.stream-dispatcher-7] - // [akka.actor.ActorSystemImpl(default)] 3 - // [WARN] [12/10/2019 13:51:58.310] [default-akka.test.stream-dispatcher-7] + // [INFO] [12/10/2019 13:51:58.300] [default-pekko.test.stream-dispatcher-7] + // [pekko.actor.ActorSystemImpl(default)] 1 + // [INFO] [12/10/2019 13:51:58.301] [default-pekko.test.stream-dispatcher-7] + // [pekko.actor.ActorSystemImpl(default)] 2 + // [INFO] [12/10/2019 13:51:58.302] [default-pekko.test.stream-dispatcher-7] + // [pekko.actor.ActorSystemImpl(default)] 3 + // [WARN] [12/10/2019 13:51:58.310] [default-pekko.test.stream-dispatcher-7] // [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: // (RuntimeException: darn) // --> 1 second gap - // [INFO] [12/10/2019 13:51:59.379] [default-akka.test.stream-dispatcher-8] - // [akka.actor.ActorSystemImpl(default)] 1 - // [INFO] [12/10/2019 13:51:59.382] [default-akka.test.stream-dispatcher-8] - // [akka.actor.ActorSystemImpl(default)] 2 - // [INFO] [12/10/2019 13:51:59.383] [default-akka.test.stream-dispatcher-8] - // [akka.actor.ActorSystemImpl(default)] 3 - // [WARN] [12/10/2019 13:51:59.386] [default-akka.test.stream-dispatcher-8] + // [INFO] [12/10/2019 13:51:59.379] [default-pekko.test.stream-dispatcher-8] + // [pekko.actor.ActorSystemImpl(default)] 1 + // [INFO] [12/10/2019 13:51:59.382] [default-pekko.test.stream-dispatcher-8] + // [pekko.actor.ActorSystemImpl(default)] 2 + // [INFO] [12/10/2019 13:51:59.383] [default-pekko.test.stream-dispatcher-8] + // [pekko.actor.ActorSystemImpl(default)] 3 + // [WARN] [12/10/2019 13:51:59.386] [default-pekko.test.stream-dispatcher-8] // [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: // (RuntimeException: darn) // --> 2 second gap - // [INFO] [12/10/2019 13:52:01.594] [default-akka.test.stream-dispatcher-8] - // [akka.actor.ActorSystemImpl(default)] 1 - // [INFO] [12/10/2019 13:52:01.595] [default-akka.test.stream-dispatcher-8] - // [akka.actor.ActorSystemImpl(default)] 2 - // [INFO] [12/10/2019 13:52:01.595] [default-akka.test.stream-dispatcher-8] - // [akka.actor.ActorSystemImpl(default)] 3 - // [WARN] [12/10/2019 13:52:01.596] [default-akka.test.stream-dispatcher-8] + // [INFO] [12/10/2019 13:52:01.594] [default-pekko.test.stream-dispatcher-8] + // [pekko.actor.ActorSystemImpl(default)] 1 + // [INFO] [12/10/2019 13:52:01.595] [default-pekko.test.stream-dispatcher-8] + // [pekko.actor.ActorSystemImpl(default)] 2 + // [INFO] [12/10/2019 13:52:01.595] [default-pekko.test.stream-dispatcher-8] + // [pekko.actor.ActorSystemImpl(default)] 3 + // [WARN] [12/10/2019 13:52:01.596] [default-pekko.test.stream-dispatcher-8] // [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: // (RuntimeException: darn) // #restart-failure-inner-failure diff --git a/docs/src/test/java/jdocs/testkit/ParentChildTest.java b/docs/src/test/java/jdocs/testkit/ParentChildTest.java index eadb6cf7ae..997d8f4abf 100644 --- a/docs/src/test/java/jdocs/testkit/ParentChildTest.java +++ b/docs/src/test/java/jdocs/testkit/ParentChildTest.java @@ -21,7 +21,8 @@ public class ParentChildTest extends AbstractJavaTest { public static AkkaJUnitActorSystemResource actorSystemResource = new AkkaJUnitActorSystemResource( "TestKitDocTest", - ConfigFactory.parseString("akka.loggers = [org.apache.pekko.testkit.TestEventListener]")); + ConfigFactory.parseString( + "pekko.loggers = [org.apache.pekko.testkit.TestEventListener]")); private final ActorSystem system = actorSystemResource.getSystem(); diff --git a/docs/src/test/java/jdocs/testkit/TestKitDocTest.java b/docs/src/test/java/jdocs/testkit/TestKitDocTest.java index 94192da367..ffde2ced2f 100644 --- a/docs/src/test/java/jdocs/testkit/TestKitDocTest.java +++ b/docs/src/test/java/jdocs/testkit/TestKitDocTest.java @@ -44,7 +44,8 @@ public class TestKitDocTest extends AbstractJavaTest { public static AkkaJUnitActorSystemResource actorSystemResource = new AkkaJUnitActorSystemResource( "TestKitDocTest", - ConfigFactory.parseString("akka.loggers = [org.apache.pekko.testkit.TestEventListener]")); + ConfigFactory.parseString( + "pekko.loggers = [org.apache.pekko.testkit.TestEventListener]")); private final ActorSystem system = actorSystemResource.getSystem(); diff --git a/docs/src/test/resources/application.conf b/docs/src/test/resources/application.conf index adaa9d9bdc..cadf8a256b 100644 --- a/docs/src/test/resources/application.conf +++ b/docs/src/test/resources/application.conf @@ -1 +1 @@ -akka.loggers = ["org.apache.pekko.testkit.TestEventListener"] +pekko.loggers = ["org.apache.pekko.testkit.TestEventListener"] diff --git a/docs/src/test/scala/docs/actor/ActorDocSpec.scala b/docs/src/test/scala/docs/actor/ActorDocSpec.scala index ad086ea1c6..8d939fa474 100644 --- a/docs/src/test/scala/docs/actor/ActorDocSpec.scala +++ b/docs/src/test/scala/docs/actor/ActorDocSpec.scala @@ -320,8 +320,8 @@ case class Register(user: User) //#immutable-message-definition class ActorDocSpec extends AkkaSpec(""" - akka.loglevel = INFO - akka.loggers = [] + pekko.loglevel = INFO + pekko.loggers = [] """) { "import context" in { diff --git a/docs/src/test/scala/docs/actor/FaultHandlingDocSample.scala b/docs/src/test/scala/docs/actor/FaultHandlingDocSample.scala index c09c0708e9..74810cd9b3 100644 --- a/docs/src/test/scala/docs/actor/FaultHandlingDocSample.scala +++ b/docs/src/test/scala/docs/actor/FaultHandlingDocSample.scala @@ -25,8 +25,8 @@ object FaultHandlingDocSample extends App { import Worker._ val config = ConfigFactory.parseString(""" - akka.loglevel = "DEBUG" - akka.actor.debug { + pekko.loglevel = "DEBUG" + pekko.actor.debug { receive = on lifecycle = on } diff --git a/docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala b/docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala index 5c9fe7b612..332eef4f34 100644 --- a/docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala +++ b/docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala @@ -99,7 +99,7 @@ object FaultHandlingDocSpec { // #child val testConf: Config = ConfigFactory.parseString(""" - akka { + pekko { loggers = ["org.apache.pekko.testkit.TestEventListener"] } """) @@ -117,7 +117,7 @@ class FaultHandlingDocSpec(_system: ActorSystem) ActorSystem( "FaultHandlingDocSpec", ConfigFactory.parseString(""" - akka { + pekko { loggers = ["org.apache.pekko.testkit.TestEventListener"] loglevel = "WARNING" } diff --git a/docs/src/test/scala/docs/actor/SchedulerDocSpec.scala b/docs/src/test/scala/docs/actor/SchedulerDocSpec.scala index 64e38fb8a8..d825fd317c 100644 --- a/docs/src/test/scala/docs/actor/SchedulerDocSpec.scala +++ b/docs/src/test/scala/docs/actor/SchedulerDocSpec.scala @@ -16,7 +16,7 @@ import scala.concurrent.duration._ import pekko.testkit._ -class SchedulerDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { +class SchedulerDocSpec extends AkkaSpec(Map("pekko.loglevel" -> "INFO")) { "schedule a one-off task" in { // #schedule-one-off-message // Use the system's dispatcher as ExecutionContext diff --git a/docs/src/test/scala/docs/cluster/ClusterDocSpec.scala b/docs/src/test/scala/docs/cluster/ClusterDocSpec.scala index d6ac8ba34b..aacf55e7ff 100644 --- a/docs/src/test/scala/docs/cluster/ClusterDocSpec.scala +++ b/docs/src/test/scala/docs/cluster/ClusterDocSpec.scala @@ -13,8 +13,8 @@ object ClusterDocSpec { val config = """ - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 """ } diff --git a/docs/src/test/scala/docs/cluster/FactorialBackend.scala b/docs/src/test/scala/docs/cluster/FactorialBackend.scala index 0fd8fd9b94..b7d784a166 100644 --- a/docs/src/test/scala/docs/cluster/FactorialBackend.scala +++ b/docs/src/test/scala/docs/cluster/FactorialBackend.scala @@ -43,8 +43,8 @@ object FactorialBackend { // Override the configuration of the port when specified as program argument val port = if (args.isEmpty) "0" else args(0) val config = ConfigFactory - .parseString(s"akka.remote.classic.netty.tcp.port=$port") - .withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]")) + .parseString(s"pekko.remote.classic.netty.tcp.port=$port") + .withFallback(ConfigFactory.parseString("pekko.cluster.roles = [backend]")) .withFallback(ConfigFactory.load("factorial")) val system = ActorSystem("ClusterSystem", config) diff --git a/docs/src/test/scala/docs/cluster/FactorialFrontend.scala b/docs/src/test/scala/docs/cluster/FactorialFrontend.scala index 4035e3bc75..6a1563c0fc 100644 --- a/docs/src/test/scala/docs/cluster/FactorialFrontend.scala +++ b/docs/src/test/scala/docs/cluster/FactorialFrontend.scala @@ -52,7 +52,7 @@ object FactorialFrontend { val upToN = 200 val config = - ConfigFactory.parseString("akka.cluster.roles = [frontend]").withFallback(ConfigFactory.load("factorial")) + ConfigFactory.parseString("pekko.cluster.roles = [frontend]").withFallback(ConfigFactory.load("factorial")) val system = ActorSystem("ClusterSystem", config) system.log.info("Factorials will start when 2 backend members in the cluster.") diff --git a/docs/src/test/scala/docs/cluster/TransformationBackend.scala b/docs/src/test/scala/docs/cluster/TransformationBackend.scala index 7a1d4db08c..75cec9a831 100644 --- a/docs/src/test/scala/docs/cluster/TransformationBackend.scala +++ b/docs/src/test/scala/docs/cluster/TransformationBackend.scala @@ -47,8 +47,8 @@ object TransformationBackend { // Override the configuration of the port when specified as program argument val port = if (args.isEmpty) "0" else args(0) val config = ConfigFactory - .parseString(s"akka.remote.classic.netty.tcp.port=$port") - .withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]")) + .parseString(s"pekko.remote.classic.netty.tcp.port=$port") + .withFallback(ConfigFactory.parseString("pekko.cluster.roles = [backend]")) .withFallback(ConfigFactory.load()) val system = ActorSystem("ClusterSystem", config) diff --git a/docs/src/test/scala/docs/cluster/TransformationFrontend.scala b/docs/src/test/scala/docs/cluster/TransformationFrontend.scala index f12b8cfcd1..04b8fe90ad 100644 --- a/docs/src/test/scala/docs/cluster/TransformationFrontend.scala +++ b/docs/src/test/scala/docs/cluster/TransformationFrontend.scala @@ -46,8 +46,8 @@ object TransformationFrontend { // Override the configuration of the port when specified as program argument val port = if (args.isEmpty) "0" else args(0) val config = ConfigFactory - .parseString(s"akka.remote.classic.netty.tcp.port=$port") - .withFallback(ConfigFactory.parseString("akka.cluster.roles = [frontend]")) + .parseString(s"pekko.remote.classic.netty.tcp.port=$port") + .withFallback(ConfigFactory.parseString("pekko.cluster.roles = [frontend]")) .withFallback(ConfigFactory.load()) val system = ActorSystem("ClusterSystem", config) diff --git a/docs/src/test/scala/docs/config/ConfigDocSpec.scala b/docs/src/test/scala/docs/config/ConfigDocSpec.scala index 105e957570..4888b45af5 100644 --- a/docs/src/test/scala/docs/config/ConfigDocSpec.scala +++ b/docs/src/test/scala/docs/config/ConfigDocSpec.scala @@ -20,7 +20,7 @@ class ConfigDocSpec extends AnyWordSpec with Matchers { def compileOnlyCustomConfig(): Unit = { // #custom-config val customConf = ConfigFactory.parseString(""" - akka.log-config-on-start = on + pekko.log-config-on-start = on """) // ConfigFactory.load sandwiches customConfig between default reference // config and default overrides, and then resolves it. @@ -64,7 +64,7 @@ class ConfigDocSpec extends AnyWordSpec with Matchers { val conf = ConfigFactory.parseString(""" #//#deployment-section - akka.actor.deployment { + pekko.actor.deployment { # '/user/actorA/actorB' is a remote deployed actor /actorA/actorB { diff --git a/docs/src/test/scala/docs/coordination/LeaseDocSpec.scala b/docs/src/test/scala/docs/coordination/LeaseDocSpec.scala index 6f00c7817d..b2e19e3875 100644 --- a/docs/src/test/scala/docs/coordination/LeaseDocSpec.scala +++ b/docs/src/test/scala/docs/coordination/LeaseDocSpec.scala @@ -41,7 +41,7 @@ object LeaseDocSpec { ConfigFactory.parseString(""" jdocs-lease.lease-class = "jdocs.coordination.LeaseDocTest$SampleLease" #lease-config - akka.actor.provider = cluster + pekko.actor.provider = cluster docs-lease { lease-class = "docs.coordination.SampleLease" heartbeat-timeout = 100s diff --git a/docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala b/docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala index 806ad25b71..000bb7eccc 100644 --- a/docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala +++ b/docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala @@ -18,11 +18,11 @@ object DistributedDataDocSpec { val config = """ - akka.actor.provider = "cluster" - akka.remote.classic.netty.tcp.port = 0 + pekko.actor.provider = "cluster" + pekko.remote.classic.netty.tcp.port = 0 #//#serializer-config - akka.actor { + pekko.actor { serializers { two-phase-set = "docs.ddata.protobuf.TwoPhaseSetSerializer" } @@ -33,7 +33,7 @@ object DistributedDataDocSpec { #//#serializer-config #//#japi-serializer-config - akka.actor { + pekko.actor { serializers { twophaseset = "jdocs.ddata.protobuf.TwoPhaseSetSerializer" } diff --git a/docs/src/test/scala/docs/discovery/DnsDiscoveryDocSpec.scala b/docs/src/test/scala/docs/discovery/DnsDiscoveryDocSpec.scala index 6c950ec0ca..39755e16b2 100644 --- a/docs/src/test/scala/docs/discovery/DnsDiscoveryDocSpec.scala +++ b/docs/src/test/scala/docs/discovery/DnsDiscoveryDocSpec.scala @@ -13,9 +13,9 @@ import scala.concurrent.Future object DnsDiscoveryDocSpec { val config = ConfigFactory.parseString(""" // #configure-dns - akka { + pekko { discovery { - method = akka-dns + method = pekko-dns } } // #configure-dns @@ -25,7 +25,7 @@ object DnsDiscoveryDocSpec { class DnsDiscoveryDocSpec extends AkkaSpec(DnsDiscoveryDocSpec.config) { "DNS Discovery" should { - "find akka.io" in { + "find pekko.io" in { // #lookup-dns import org.apache.pekko import pekko.discovery.Discovery @@ -33,16 +33,16 @@ class DnsDiscoveryDocSpec extends AkkaSpec(DnsDiscoveryDocSpec.config) { val discovery: ServiceDiscovery = Discovery(system).discovery // ... - val result: Future[ServiceDiscovery.Resolved] = discovery.lookup("akka.io", resolveTimeout = 3.seconds) + val result: Future[ServiceDiscovery.Resolved] = discovery.lookup("pekko.io", resolveTimeout = 3.seconds) // #lookup-dns try { val resolved = result.futureValue - resolved.serviceName shouldBe "akka.io" + resolved.serviceName shouldBe "pekko.io" resolved.addresses shouldNot be(Symbol("empty")) } catch { case e: Exception => - info("Failed lookup akka.io, but ignoring: " + e) + info("Failed lookup pekko.io, but ignoring: " + e) pending } } diff --git a/docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala b/docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala index a643749917..6257bf692a 100644 --- a/docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala +++ b/docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala @@ -33,7 +33,7 @@ object DispatcherDocSpec { "jdocs.dispatcher.MyUnboundedMessageQueueSemantics" } - akka.actor.mailbox.requirements { + pekko.actor.mailbox.requirements { "jdocs.dispatcher.MyUnboundedMessageQueueSemantics" = custom-dispatcher-mailbox } @@ -167,7 +167,7 @@ object DispatcherDocSpec { //#prio-dispatcher-config //#dispatcher-deployment-config - akka.actor.deployment { + pekko.actor.deployment { /myactor { dispatcher = my-dispatcher } @@ -183,7 +183,7 @@ object DispatcherDocSpec { //#mailbox-deployment-config - akka.actor.deployment { + pekko.actor.deployment { /priomailboxactor { mailbox = prio-mailbox } @@ -199,7 +199,7 @@ object DispatcherDocSpec { //#required-mailbox-config - akka.actor.mailbox.requirements { + pekko.actor.mailbox.requirements { "org.apache.pekko.dispatch.BoundedMessageQueueSemantics" = bounded-mailbox } //#required-mailbox-config @@ -210,7 +210,7 @@ object DispatcherDocSpec { "docs.dispatcher.MyUnboundedMessageQueueSemantics" } - akka.actor.mailbox.requirements { + pekko.actor.mailbox.requirements { "docs.dispatcher.MyUnboundedMessageQueueSemantics" = custom-dispatcher-mailbox } diff --git a/docs/src/test/scala/docs/extension/ExtensionDocSpec.scala b/docs/src/test/scala/docs/extension/ExtensionDocSpec.scala index 20fd773eba..de2d65a52c 100644 --- a/docs/src/test/scala/docs/extension/ExtensionDocSpec.scala +++ b/docs/src/test/scala/docs/extension/ExtensionDocSpec.scala @@ -53,7 +53,7 @@ object ExtensionDocSpec { val config = """ //#config - akka { + pekko { extensions = ["docs.extension.CountExtension"] } //#config diff --git a/docs/src/test/scala/docs/future/FutureDocSpec.scala b/docs/src/test/scala/docs/future/FutureDocSpec.scala index be3efbf095..d452487cd4 100644 --- a/docs/src/test/scala/docs/future/FutureDocSpec.scala +++ b/docs/src/test/scala/docs/future/FutureDocSpec.scala @@ -41,7 +41,7 @@ object FutureDocSpec { // #pipe-to-usage class ActorUsingPipeTo(target: ActorRef) extends Actor { - // akka.pattern.pipe needs to be imported + // pekko.pattern.pipe needs to be imported import org.apache.pekko.pattern.{ ask, pipe } // implicit ExecutionContext should be in scope implicit val ec: ExecutionContext = context.dispatcher diff --git a/docs/src/test/scala/docs/io/EchoServer.scala b/docs/src/test/scala/docs/io/EchoServer.scala index 2133c11910..bc151be42f 100644 --- a/docs/src/test/scala/docs/io/EchoServer.scala +++ b/docs/src/test/scala/docs/io/EchoServer.scala @@ -15,7 +15,7 @@ import scala.io.StdIn object EchoServer extends App { - val config = ConfigFactory.parseString("akka.loglevel = DEBUG") + val config = ConfigFactory.parseString("pekko.loglevel = DEBUG") implicit val system: ActorSystem = ActorSystem("EchoServer", config) system.actorOf(Props(classOf[EchoManager], classOf[EchoHandler]), "echo") diff --git a/docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala b/docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala index 9f37a935f4..268de81ef1 100644 --- a/docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala +++ b/docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala @@ -20,10 +20,10 @@ object PersistenceDocSpec { val config = """ //#auto-update-interval - akka.persistence.view.auto-update-interval = 5s + pekko.persistence.view.auto-update-interval = 5s //#auto-update-interval //#auto-update - akka.persistence.view.auto-update = off + pekko.persistence.view.auto-update = off //#auto-update """ diff --git a/docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala b/docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala index 28797febca..cd5040dcc0 100644 --- a/docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala +++ b/docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala @@ -16,10 +16,10 @@ class PersistenceEventAdapterDocSpec(config: String) extends AkkaSpec(config) { def this() = this(""" - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" //#event-adapters-config - akka.persistence.journal { + pekko.persistence.journal { inmem { event-adapters { tagging = "docs.persistence.MyTaggingEventAdapter" @@ -37,7 +37,7 @@ class PersistenceEventAdapterDocSpec(config: String) extends AkkaSpec(config) { //#event-adapters-config - akka.persistence.journal { + pekko.persistence.journal { auto-json-store { class = "org.apache.pekko.persistence.journal.inmem.InmemJournal" # reuse inmem, as an example @@ -72,7 +72,7 @@ class PersistenceEventAdapterDocSpec(config: String) extends AkkaSpec(config) { val props = Props(new PersistentActor { override def persistenceId: String = "json-actor" - override def journalPluginId: String = "akka.persistence.journal.auto-json-store" + override def journalPluginId: String = "pekko.persistence.journal.auto-json-store" override def receiveRecover: Receive = { case RecoveryCompleted => // ignore... @@ -107,7 +107,7 @@ class PersistenceEventAdapterDocSpec(config: String) extends AkkaSpec(config) { val props = Props(new PersistentActor { override def persistenceId: String = "json-actor" - override def journalPluginId: String = "akka.persistence.journal.manual-json-store" + override def journalPluginId: String = "pekko.persistence.journal.manual-json-store" override def receiveRecover: Receive = { case RecoveryCompleted => // ignore... diff --git a/docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala b/docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala index fa28a4fe0a..6ccf145cc7 100644 --- a/docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala +++ b/docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala @@ -11,9 +11,9 @@ object PersistenceMultiDocSpec { """ //#default-config # Absolute path to the default journal plugin configuration entry. - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" # Absolute path to the default snapshot store plugin configuration entry. - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" //#default-config """ @@ -28,14 +28,14 @@ object PersistenceMultiDocSpec { s""" //#override-config # Configuration entry for the custom journal plugin, see `journalPluginId`. - akka.persistence.chronicle.journal { + pekko.persistence.chronicle.journal { # Standard persistence extension property: provider FQCN. class = "org.apache.pekko.persistence.chronicle.ChronicleSyncJournal" # Custom setting specific for the journal `ChronicleSyncJournal`. folder = $${user.dir}/store/journal } # Configuration entry for the custom snapshot store plugin, see `snapshotPluginId`. - akka.persistence.chronicle.snapshot-store { + pekko.persistence.chronicle.snapshot-store { # Standard persistence extension property: provider FQCN. class = "org.apache.pekko.persistence.chronicle.ChronicleSnapshotStore" # Custom setting specific for the snapshot store `ChronicleSnapshotStore`. @@ -49,10 +49,10 @@ object PersistenceMultiDocSpec { override def persistenceId = "123" // Absolute path to the journal plugin configuration entry in the `reference.conf`. - override def journalPluginId = "akka.persistence.chronicle.journal" + override def journalPluginId = "pekko.persistence.chronicle.journal" // Absolute path to the snapshot store plugin configuration entry in the `reference.conf`. - override def snapshotPluginId = "akka.persistence.chronicle.snapshot-store" + override def snapshotPluginId = "pekko.persistence.chronicle.snapshot-store" } // #override-plugins diff --git a/docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala b/docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala index 18acc7783e..bf7adce50f 100644 --- a/docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala +++ b/docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala @@ -27,30 +27,30 @@ object PersistencePluginDocSpec { """ //#leveldb-plugin-config # Path to the journal plugin to be used - akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" + pekko.persistence.journal.plugin = "pekko.persistence.journal.leveldb" //#leveldb-plugin-config //#leveldb-snapshot-plugin-config # Path to the snapshot store plugin to be used - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" //#leveldb-snapshot-plugin-config //#max-message-batch-size - akka.persistence.journal.leveldb.max-message-batch-size = 200 + pekko.persistence.journal.leveldb.max-message-batch-size = 200 //#max-message-batch-size //#journal-config - akka.persistence.journal.leveldb.dir = "target/journal" + pekko.persistence.journal.leveldb.dir = "target/journal" //#journal-config //#snapshot-config - akka.persistence.snapshot-store.local.dir = "target/snapshots" + pekko.persistence.snapshot-store.local.dir = "target/snapshots" //#snapshot-config //#native-config - akka.persistence.journal.leveldb.native = off + pekko.persistence.journal.leveldb.native = off //#native-config //#compaction-intervals-config # Number of deleted messages per persistence id that will trigger journal compaction - akka.persistence.journal.leveldb.compaction-intervals { + pekko.persistence.journal.leveldb.compaction-intervals { persistence-id-1 = 100 persistence-id-2 = 200 # ... @@ -68,27 +68,27 @@ class PersistencePluginDocSpec extends AnyWordSpec { """ //#journal-plugin-config # Path to the journal plugin to be used - akka.persistence.journal.plugin = "my-journal" + pekko.persistence.journal.plugin = "my-journal" # My custom journal plugin my-journal { # Class name of the plugin. class = "docs.persistence.MyJournal" # Dispatcher for the plugin actor. - plugin-dispatcher = "akka.actor.default-dispatcher" + plugin-dispatcher = "pekko.actor.default-dispatcher" } //#journal-plugin-config //#snapshot-store-plugin-config # Path to the snapshot store plugin to be used - akka.persistence.snapshot-store.plugin = "my-snapshot-store" + pekko.persistence.snapshot-store.plugin = "my-snapshot-store" # My custom snapshot store plugin my-snapshot-store { # Class name of the plugin. class = "docs.persistence.MySnapshotStore" # Dispatcher for the plugin actor. - plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher" + plugin-dispatcher = "pekko.persistence.dispatchers.default-plugin-dispatcher" } //#snapshot-store-plugin-config """ @@ -113,16 +113,16 @@ object SharedLeveldbPluginDocSpec { val config = """ //#shared-journal-config - akka.persistence.journal.plugin = "akka.persistence.journal.leveldb-shared" + pekko.persistence.journal.plugin = "pekko.persistence.journal.leveldb-shared" //#shared-journal-config //#shared-store-native-config - akka.persistence.journal.leveldb-shared.store.native = off + pekko.persistence.journal.leveldb-shared.store.native = off //#shared-store-native-config //#shared-store-config - akka.persistence.journal.leveldb-shared.store.dir = "target/shared" + pekko.persistence.journal.leveldb-shared.store.dir = "target/shared" //#shared-store-config //#event-adapter-config - akka.persistence.journal.leveldb-shared.adapter = "com.example.MyAdapter" + pekko.persistence.journal.leveldb-shared.adapter = "com.example.MyAdapter" //#event-adapter-config """ @@ -188,7 +188,7 @@ object PersistenceTCKDoc { // #journal-tck-scala class MyJournalSpec extends JournalSpec( - config = ConfigFactory.parseString("""akka.persistence.journal.plugin = "my.journal.plugin"""")) { + config = ConfigFactory.parseString("""pekko.persistence.journal.plugin = "my.journal.plugin"""")) { override def supportsRejectingNonSerializableObjects: CapabilityFlag = false // or CapabilityFlag.off @@ -205,7 +205,7 @@ object PersistenceTCKDoc { class MySnapshotStoreSpec extends SnapshotStoreSpec( config = ConfigFactory.parseString(""" - akka.persistence.snapshot-store.plugin = "my.snapshot-store.plugin" + pekko.persistence.snapshot-store.plugin = "my.snapshot-store.plugin" """)) { override def supportsSerialization: CapabilityFlag = @@ -222,15 +222,15 @@ object PersistenceTCKDoc { // #journal-tck-before-after-scala class MyJournalSpec extends JournalSpec(config = ConfigFactory.parseString(""" - akka.persistence.journal.plugin = "my.journal.plugin" + pekko.persistence.journal.plugin = "my.journal.plugin" """)) { override def supportsRejectingNonSerializableObjects: CapabilityFlag = true // or CapabilityFlag.on val storageLocations = List( - new File(system.settings.config.getString("akka.persistence.journal.leveldb.dir")), - new File(config.getString("akka.persistence.snapshot-store.local.dir"))) + new File(system.settings.config.getString("pekko.persistence.journal.leveldb.dir")), + new File(config.getString("pekko.persistence.snapshot-store.local.dir"))) override def beforeAll(): Unit = { super.beforeAll() diff --git a/docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala b/docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala index b6faba678c..ad17830d99 100644 --- a/docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala +++ b/docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala @@ -23,7 +23,7 @@ class PersistenceSchemaEvolutionDocSpec extends AnyWordSpec { val customSerializerConfig = """ //#custom-serializer-config - akka.actor { + pekko.actor { serializers { my-payload = "docs.persistence.MyPayloadSerializer" my-snapshot = "docs.persistence.MySnapshotSerializer" @@ -215,7 +215,7 @@ class PersonSerializerSettingsBox { val PersonSerializerSettings = """ //#simplest-custom-serializer-config # application.conf - akka { + pekko { actor { serializers { person = "docs.persistence.SimplestPossiblePersonSerializer" diff --git a/docs/src/test/scala/docs/persistence/PersistenceSerializerDocSpec.scala b/docs/src/test/scala/docs/persistence/PersistenceSerializerDocSpec.scala index d51a3850b2..38108db5a1 100644 --- a/docs/src/test/scala/docs/persistence/PersistenceSerializerDocSpec.scala +++ b/docs/src/test/scala/docs/persistence/PersistenceSerializerDocSpec.scala @@ -17,7 +17,7 @@ class PersistenceSerializerDocSpec extends AnyWordSpec { val customSerializerConfig = """ //#custom-serializer-config - akka.actor { + pekko.actor { serializers { my-payload = "docs.persistence.MyPayloadSerializer" my-snapshot = "docs.persistence.MySnapshotSerializer" diff --git a/docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala b/docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala index ed5a30f46e..441c8a55bf 100644 --- a/docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala +++ b/docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala @@ -34,7 +34,7 @@ object LeveldbPersistenceQueryDocSpec { } class LeveldbPersistenceQueryDocSpec - extends AkkaSpec("akka.persistence.journal.plugin = akka.persistence.journal.leveldb") { + extends AkkaSpec("pekko.persistence.journal.plugin = pekko.persistence.journal.leveldb") { "LeveldbPersistentQuery" must { "demonstrate how get ReadJournal" in { diff --git a/docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala b/docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala index 405d81ef25..b61a2ef08b 100644 --- a/docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala +++ b/docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala @@ -144,7 +144,7 @@ object PersistenceQueryDocSpec { case class Record(any: Any) class ExampleStore { def save(record: Record) = Future.successful(42L) } - val JournalId = "akka.persistence.query.my-read-journal" + val JournalId = "pekko.persistence.query.my-read-journal" class X { @@ -179,7 +179,7 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { def this() = this(""" - akka.persistence.query.my-read-journal { + pekko.persistence.query.my-read-journal { class = "docs.persistence.query.PersistenceQueryDocSpec$MyReadJournalProvider" refresh-interval = 3s } @@ -189,7 +189,7 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { // #basic-usage // obtain read journal by plugin id val readJournal = - PersistenceQuery(system).readJournalFor[MyScaladslReadJournal]("akka.persistence.query.my-read-journal") + PersistenceQuery(system).readJournalFor[MyScaladslReadJournal]("pekko.persistence.query.my-read-journal") // issue query to journal val source: Source[EventEnvelope, NotUsed] = @@ -262,7 +262,7 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { class RunWithAsyncFunction { val readJournal = - PersistenceQuery(system).readJournalFor[MyScaladslReadJournal]("akka.persistence.query.my-read-journal") + PersistenceQuery(system).readJournalFor[MyScaladslReadJournal]("pekko.persistence.query.my-read-journal") // #projection-into-different-store-simple-classes trait ExampleStore { diff --git a/docs/src/test/scala/docs/persistence/testkit/PersistenceInitSpec.scala b/docs/src/test/scala/docs/persistence/testkit/PersistenceInitSpec.scala index 8285b06183..94f9266d2c 100644 --- a/docs/src/test/scala/docs/persistence/testkit/PersistenceInitSpec.scala +++ b/docs/src/test/scala/docs/persistence/testkit/PersistenceInitSpec.scala @@ -20,9 +20,9 @@ import scala.concurrent.duration._ //#imports class PersistenceInitSpec extends ScalaTestWithActorTestKit(s""" - akka.persistence.journal.plugin = "akka.persistence.journal.inmem" - akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - akka.persistence.snapshot-store.local.dir = "target/snapshot-${UUID.randomUUID().toString}" + pekko.persistence.journal.plugin = "pekko.persistence.journal.inmem" + pekko.persistence.snapshot-store.plugin = "pekko.persistence.snapshot-store.local" + pekko.persistence.snapshot-store.local.dir = "target/snapshot-${UUID.randomUUID().toString}" """) with AnyWordSpecLike { "PersistenceInit" should { diff --git a/docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala b/docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala index 2eb7ef2cba..5452c5e8a6 100644 --- a/docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala +++ b/docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala @@ -21,10 +21,10 @@ object RemoteDeploymentDocSpec { } class RemoteDeploymentDocSpec extends AkkaSpec(""" - akka.actor.provider = remote - akka.remote.classic.netty.tcp.port = 0 - akka.remote.artery.canonical.port = 0 - akka.remote.use-unsafe-remote-features-outside-cluster = on + pekko.actor.provider = remote + pekko.remote.classic.netty.tcp.port = 0 + pekko.remote.artery.canonical.port = 0 + pekko.remote.use-unsafe-remote-features-outside-cluster = on """) with ImplicitSender { import RemoteDeploymentDocSpec._ diff --git a/docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala b/docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala index 8a2e474280..aa4ec76389 100644 --- a/docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala +++ b/docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala @@ -18,7 +18,7 @@ object CustomRouterDocSpec { val config = """ #//#config -akka.actor.deployment { +pekko.actor.deployment { /redundancy2 { router = "jdocs.routing.RedundancyGroup" routees.paths = ["/user/s1", "/user/s2", "/user/s3"] @@ -30,7 +30,7 @@ akka.actor.deployment { val jconfig = """ #//#jconfig -akka.actor.deployment { +pekko.actor.deployment { /redundancy2 { router = "jdocs.routing.RedundancyGroup" routees.paths = ["/user/s1", "/user/s2", "/user/s3"] diff --git a/docs/src/test/scala/docs/routing/RouterDocSpec.scala b/docs/src/test/scala/docs/routing/RouterDocSpec.scala index c198f4f56f..5784ac4e16 100644 --- a/docs/src/test/scala/docs/routing/RouterDocSpec.scala +++ b/docs/src/test/scala/docs/routing/RouterDocSpec.scala @@ -30,7 +30,7 @@ object RouterDocSpec { val config = """ #//#config-round-robin-pool -akka.actor.deployment { +pekko.actor.deployment { /parent/router1 { router = round-robin-pool nr-of-instances = 5 @@ -39,7 +39,7 @@ akka.actor.deployment { #//#config-round-robin-pool #//#config-round-robin-group -akka.actor.deployment { +pekko.actor.deployment { /parent/router3 { router = round-robin-group routees.paths = ["/user/workers/w1", "/user/workers/w2", "/user/workers/w3"] @@ -48,7 +48,7 @@ akka.actor.deployment { #//#config-round-robin-group #//#config-random-pool -akka.actor.deployment { +pekko.actor.deployment { /parent/router5 { router = random-pool nr-of-instances = 5 @@ -57,7 +57,7 @@ akka.actor.deployment { #//#config-random-pool #//#config-random-group -akka.actor.deployment { +pekko.actor.deployment { /parent/router7 { router = random-group routees.paths = ["/user/workers/w1", "/user/workers/w2", "/user/workers/w3"] @@ -66,7 +66,7 @@ akka.actor.deployment { #//#config-random-group #//#config-balancing-pool -akka.actor.deployment { +pekko.actor.deployment { /parent/router9 { router = balancing-pool nr-of-instances = 5 @@ -75,7 +75,7 @@ akka.actor.deployment { #//#config-balancing-pool #//#config-balancing-pool2 -akka.actor.deployment { +pekko.actor.deployment { /parent/router9b { router = balancing-pool nr-of-instances = 5 @@ -87,7 +87,7 @@ akka.actor.deployment { #//#config-balancing-pool2 #//#config-balancing-pool3 -akka.actor.deployment { +pekko.actor.deployment { /parent/router10b { router = balancing-pool nr-of-instances = 5 @@ -105,7 +105,7 @@ akka.actor.deployment { #//#config-balancing-pool3 #//#config-balancing-pool4 -akka.actor.deployment { +pekko.actor.deployment { /parent/router10c { router = balancing-pool nr-of-instances = 5 @@ -117,7 +117,7 @@ akka.actor.deployment { #//#config-balancing-pool4 #//#config-smallest-mailbox-pool -akka.actor.deployment { +pekko.actor.deployment { /parent/router11 { router = smallest-mailbox-pool nr-of-instances = 5 @@ -126,7 +126,7 @@ akka.actor.deployment { #//#config-smallest-mailbox-pool #//#config-broadcast-pool -akka.actor.deployment { +pekko.actor.deployment { /parent/router13 { router = broadcast-pool nr-of-instances = 5 @@ -135,7 +135,7 @@ akka.actor.deployment { #//#config-broadcast-pool #//#config-broadcast-group -akka.actor.deployment { +pekko.actor.deployment { /parent/router15 { router = broadcast-group routees.paths = ["/user/workers/w1", "/user/workers/w2", "/user/workers/w3"] @@ -144,7 +144,7 @@ akka.actor.deployment { #//#config-broadcast-group #//#config-scatter-gather-pool -akka.actor.deployment { +pekko.actor.deployment { /parent/router17 { router = scatter-gather-pool nr-of-instances = 5 @@ -154,7 +154,7 @@ akka.actor.deployment { #//#config-scatter-gather-pool #//#config-scatter-gather-group -akka.actor.deployment { +pekko.actor.deployment { /parent/router19 { router = scatter-gather-group routees.paths = ["/user/workers/w1", "/user/workers/w2", "/user/workers/w3"] @@ -164,7 +164,7 @@ akka.actor.deployment { #//#config-scatter-gather-group #//#config-tail-chopping-pool -akka.actor.deployment { +pekko.actor.deployment { /parent/router21 { router = tail-chopping-pool nr-of-instances = 5 @@ -175,7 +175,7 @@ akka.actor.deployment { #//#config-tail-chopping-pool #//#config-tail-chopping-group -akka.actor.deployment { +pekko.actor.deployment { /parent/router23 { router = tail-chopping-group routees.paths = ["/user/workers/w1", "/user/workers/w2", "/user/workers/w3"] @@ -186,7 +186,7 @@ akka.actor.deployment { #//#config-tail-chopping-group #//#config-consistent-hashing-pool -akka.actor.deployment { +pekko.actor.deployment { /parent/router25 { router = consistent-hashing-pool nr-of-instances = 5 @@ -196,7 +196,7 @@ akka.actor.deployment { #//#config-consistent-hashing-pool #//#config-consistent-hashing-group -akka.actor.deployment { +pekko.actor.deployment { /parent/router27 { router = consistent-hashing-group routees.paths = ["/user/workers/w1", "/user/workers/w2", "/user/workers/w3"] @@ -206,7 +206,7 @@ akka.actor.deployment { #//#config-consistent-hashing-group #//#config-remote-round-robin-pool -akka.actor.deployment { +pekko.actor.deployment { /parent/remotePool { router = round-robin-pool nr-of-instances = 10 @@ -216,7 +216,7 @@ akka.actor.deployment { #//#config-remote-round-robin-pool #//#config-remote-round-robin-pool-artery -akka.actor.deployment { +pekko.actor.deployment { /parent/remotePool { router = round-robin-pool nr-of-instances = 10 @@ -226,7 +226,7 @@ akka.actor.deployment { #//#config-remote-round-robin-pool-artery #//#config-remote-round-robin-group -akka.actor.deployment { +pekko.actor.deployment { /parent/remoteGroup { router = round-robin-group routees.paths = [ @@ -238,7 +238,7 @@ akka.actor.deployment { #//#config-remote-round-robin-group #//#config-remote-round-robin-group-artery -akka.actor.deployment { +pekko.actor.deployment { /parent/remoteGroup2 { router = round-robin-group routees.paths = [ @@ -250,7 +250,7 @@ akka.actor.deployment { #//#config-remote-round-robin-group-artery #//#config-resize-pool -akka.actor.deployment { +pekko.actor.deployment { /parent/router29 { router = round-robin-pool resizer { @@ -263,7 +263,7 @@ akka.actor.deployment { #//#config-resize-pool #//#config-optimal-size-exploring-resize-pool -akka.actor.deployment { +pekko.actor.deployment { /parent/router31 { router = round-robin-pool optimal-size-exploring-resizer { @@ -276,7 +276,7 @@ akka.actor.deployment { #//#config-optimal-size-exploring-resize-pool #//#config-pool-dispatcher -akka.actor.deployment { +pekko.actor.deployment { /poolWithDispatcher { router = random-pool nr-of-instances = 5 diff --git a/docs/src/test/scala/docs/serialization/SerializationDocSpec.scala b/docs/src/test/scala/docs/serialization/SerializationDocSpec.scala index 2d6a46efc0..1f7b51265e 100644 --- a/docs/src/test/scala/docs/serialization/SerializationDocSpec.scala +++ b/docs/src/test/scala/docs/serialization/SerializationDocSpec.scala @@ -114,7 +114,7 @@ package docs.serialization { val config = """ #//#serialization-identifiers-config - akka { + pekko { actor { serialization-identifiers { "docs.serialization.MyOwnSerializer" = 1234567 @@ -129,7 +129,7 @@ package docs.serialization { "demonstrate configuration of serialize messages" in { val config = ConfigFactory.parseString(""" #//#serialize-messages-config - akka { + pekko { actor { serialize-messages = on } @@ -144,7 +144,7 @@ package docs.serialization { "demonstrate configuration of serialize creators" in { val config = ConfigFactory.parseString(""" #//#serialize-creators-config - akka { + pekko { actor { serialize-creators = on } @@ -159,7 +159,7 @@ package docs.serialization { "demonstrate configuration of serializers" in { val config = ConfigFactory.parseString(""" #//#serialize-serializers-config - akka { + pekko { actor { serializers { jackson-json = "org.apache.pekko.serialization.jackson.JacksonJsonSerializer" @@ -178,7 +178,7 @@ package docs.serialization { "demonstrate configuration of serialization-bindings" in { val config = ConfigFactory.parseString(""" #//#serialization-bindings-config - akka { + pekko { actor { serializers { jackson-json = "org.apache.pekko.serialization.jackson.JacksonJsonSerializer" diff --git a/docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala b/docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala index f6e2ebb594..3cd8d46cc6 100644 --- a/docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala +++ b/docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala @@ -11,7 +11,7 @@ import org.apache.pekko.stream.scaladsl._ import org.apache.pekko.testkit.{ AkkaSpec, EventFilter } import scala.concurrent.ExecutionContext -class GraphStageLoggingDocSpec extends AkkaSpec("akka.loglevel = DEBUG") { +class GraphStageLoggingDocSpec extends AkkaSpec("pekko.loglevel = DEBUG") { implicit val ec: ExecutionContext = system.dispatcher diff --git a/docs/src/test/scala/docs/stream/IntegrationDocSpec.scala b/docs/src/test/scala/docs/stream/IntegrationDocSpec.scala index d0dcef8772..10e4fc7947 100644 --- a/docs/src/test/scala/docs/stream/IntegrationDocSpec.scala +++ b/docs/src/test/scala/docs/stream/IntegrationDocSpec.scala @@ -38,7 +38,7 @@ object IntegrationDocSpec { } #//#blocking-dispatcher-config - akka.actor.default-mailbox.mailbox-type = org.apache.pekko.dispatch.UnboundedMailbox + pekko.actor.default-mailbox.mailbox-type = org.apache.pekko.dispatch.UnboundedMailbox """) class AddressSystem { diff --git a/docs/src/test/scala/docs/stream/operators/SourceOperators.scala b/docs/src/test/scala/docs/stream/operators/SourceOperators.scala index 4a2854191a..b0ee028b4f 100644 --- a/docs/src/test/scala/docs/stream/operators/SourceOperators.scala +++ b/docs/src/test/scala/docs/stream/operators/SourceOperators.scala @@ -70,7 +70,7 @@ object SourceOperators { val source: Source[String, ActorRef] = Source.actorRefWithBackpressure[String]( ackMessage = "ack", - // complete when we send akka.actor.status.Success + // complete when we send pekko.actor.status.Success completionMatcher = { case _: Success => CompletionStrategy.immediately }, diff --git a/docs/src/test/scala/docs/stream/operators/source/Restart.scala b/docs/src/test/scala/docs/stream/operators/source/Restart.scala index 4f3e5be323..c10a4b0d0c 100644 --- a/docs/src/test/scala/docs/stream/operators/source/Restart.scala +++ b/docs/src/test/scala/docs/stream/operators/source/Restart.scala @@ -34,20 +34,20 @@ object Restart extends App { RestartSettings(minBackoff = 1.second, maxBackoff = 10.seconds, randomFactor = 0.1))(() => flakySource) forever.runWith(Sink.foreach(nr => system.log.info("{}", nr()))) // logs - // [INFO] [12/10/2019 13:51:58.300] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 1 - // [INFO] [12/10/2019 13:51:58.301] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 2 - // [INFO] [12/10/2019 13:51:58.302] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 3 - // [WARN] [12/10/2019 13:51:58.310] [default-akka.test.stream-dispatcher-7] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) + // [INFO] [12/10/2019 13:51:58.300] [default-pekko.test.stream-dispatcher-7] [pekko.actor.ActorSystemImpl(default)] 1 + // [INFO] [12/10/2019 13:51:58.301] [default-pekko.test.stream-dispatcher-7] [pekko.actor.ActorSystemImpl(default)] 2 + // [INFO] [12/10/2019 13:51:58.302] [default-pekko.test.stream-dispatcher-7] [pekko.actor.ActorSystemImpl(default)] 3 + // [WARN] [12/10/2019 13:51:58.310] [default-pekko.test.stream-dispatcher-7] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) // --> 1 second gap - // [INFO] [12/10/2019 13:51:59.379] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 1 - // [INFO] [12/10/2019 13:51:59.382] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 2 - // [INFO] [12/10/2019 13:51:59.383] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 3 - // [WARN] [12/10/2019 13:51:59.386] [default-akka.test.stream-dispatcher-8] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) + // [INFO] [12/10/2019 13:51:59.379] [default-pekko.test.stream-dispatcher-8] [pekko.actor.ActorSystemImpl(default)] 1 + // [INFO] [12/10/2019 13:51:59.382] [default-pekko.test.stream-dispatcher-8] [pekko.actor.ActorSystemImpl(default)] 2 + // [INFO] [12/10/2019 13:51:59.383] [default-pekko.test.stream-dispatcher-8] [pekko.actor.ActorSystemImpl(default)] 3 + // [WARN] [12/10/2019 13:51:59.386] [default-pekko.test.stream-dispatcher-8] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) // --> 2 second gap - // [INFO] [12/10/2019 13:52:01.594] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 1 - // [INFO] [12/10/2019 13:52:01.595] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 2 - // [INFO] [12/10/2019 13:52:01.595] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 3 - // [WARN] [12/10/2019 13:52:01.596] [default-akka.test.stream-dispatcher-8] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) + // [INFO] [12/10/2019 13:52:01.594] [default-pekko.test.stream-dispatcher-8] [pekko.actor.ActorSystemImpl(default)] 1 + // [INFO] [12/10/2019 13:52:01.595] [default-pekko.test.stream-dispatcher-8] [pekko.actor.ActorSystemImpl(default)] 2 + // [INFO] [12/10/2019 13:52:01.595] [default-pekko.test.stream-dispatcher-8] [pekko.actor.ActorSystemImpl(default)] 3 + // [WARN] [12/10/2019 13:52:01.596] [default-pekko.test.stream-dispatcher-8] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) // #restart-failure-inner-failure } diff --git a/docs/src/test/scala/docs/testkit/TestKitUsageSpec.scala b/docs/src/test/scala/docs/testkit/TestKitUsageSpec.scala index e68f565d8f..f2e4ced17f 100644 --- a/docs/src/test/scala/docs/testkit/TestKitUsageSpec.scala +++ b/docs/src/test/scala/docs/testkit/TestKitUsageSpec.scala @@ -109,7 +109,7 @@ class TestKitUsageSpec object TestKitUsageSpec { // Define your test specific configuration here val config = """ - akka { + pekko { loglevel = "WARNING" } """ diff --git a/docs/src/test/scala/docs/testkit/TestkitDocSpec.scala b/docs/src/test/scala/docs/testkit/TestkitDocSpec.scala index 12c4dbe657..df34b35d67 100644 --- a/docs/src/test/scala/docs/testkit/TestkitDocSpec.scala +++ b/docs/src/test/scala/docs/testkit/TestkitDocSpec.scala @@ -327,7 +327,7 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { implicit val system: ActorSystem = ActorSystem( "testsystem", ConfigFactory.parseString(""" - akka.loggers = ["org.apache.pekko.testkit.TestEventListener"] + pekko.loggers = ["org.apache.pekko.testkit.TestEventListener"] """)) try { val actor = system.actorOf(Props.empty) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 2b2ea3b5c0..460d36f25c 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -23,7 +23,7 @@ object AkkaBuild { // CI is the env var defined by Github Actions and Travis: // - https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables // - https://docs.travis-ci.com/user/environment-variables/#default-environment-variables - val runningOnCi: CliOption[Boolean] = CliOption("akka.ci-server", sys.env.contains("CI")) + val runningOnCi: CliOption[Boolean] = CliOption("pekko.ci-server", sys.env.contains("CI")) } val enableMiMa = true @@ -37,12 +37,12 @@ object AkkaBuild { UnidocRoot.akkaSettings, Protobuf.settings, GlobalScope / parallelExecution := System - .getProperty("akka.parallelExecution", parallelExecutionByDefault.toString) + .getProperty("pekko.parallelExecution", parallelExecutionByDefault.toString) .toBoolean, // used for linking to API docs (overwrites `project-info.version`) ThisBuild / projectInfoVersion := { if (isSnapshot.value) "snapshot" else version.value }) - lazy val mayChangeSettings = Seq(description := """|This module of Akka is marked as + lazy val mayChangeSettings = Seq(description := """|This module of Pekko is marked as |'may change', which means that it is in early |access mode, which also means that it is not covered |by commercial support. An module marked 'may change' doesn't @@ -55,7 +55,7 @@ object AkkaBuild { |""".stripMargin) val (mavenLocalResolver, mavenLocalResolverSettings) = - System.getProperty("akka.build.M2Dir") match { + System.getProperty("pekko.build.M2Dir") match { case null => (Resolver.mavenLocal, Seq.empty) case path => // Maven resolver settings @@ -81,17 +81,17 @@ object AkkaBuild { lazy val resolverSettings = Def.settings( // should we be allowed to use artifacts published to the local maven repository - if (System.getProperty("akka.build.useLocalMavenResolver", "false").toBoolean) + if (System.getProperty("pekko.build.useLocalMavenResolver", "false").toBoolean) resolvers += mavenLocalResolver else Seq.empty, // should we be allowed to use artifacts from sonatype snapshots - if (System.getProperty("akka.build.useSnapshotSonatypeResolver", "false").toBoolean) + if (System.getProperty("pekko.build.useSnapshotSonatypeResolver", "false").toBoolean) resolvers ++= Resolver.sonatypeOssRepos("snapshots") else Seq.empty, pomIncludeRepository := (_ => false) // do not leak internal repositories during staging ) - private def allWarnings: Boolean = System.getProperty("akka.allwarnings", "false").toBoolean + private def allWarnings: Boolean = System.getProperty("pekko.allwarnings", "false").toBoolean final val DefaultScalacOptions = Def.setting { if (scalaVersion.value.startsWith("3.")) { @@ -177,8 +177,8 @@ object AkkaBuild { |import com.typesafe.config.ConfigFactory |import scala.concurrent.duration._ |import org.apache.pekko.util.Timeout - |var config = ConfigFactory.parseString("akka.stdout-loglevel=INFO,akka.loglevel=DEBUG,pinned{type=PinnedDispatcher,executor=thread-pool-executor,throughput=1000}") - |var remoteConfig = ConfigFactory.parseString("akka.remote.classic.netty{port=0,use-dispatcher-for-io=akka.actor.default-dispatcher,execution-pool-size=0},akka.actor.provider=remote").withFallback(config) + |var config = ConfigFactory.parseString("pekko.stdout-loglevel=INFO,pekko.loglevel=DEBUG,pinned{type=PinnedDispatcher,executor=thread-pool-executor,throughput=1000}") + |var remoteConfig = ConfigFactory.parseString("pekko.remote.classic.netty{port=0,use-dispatcher-for-io=pekko.actor.default-dispatcher,execution-pool-size=0},pekko.actor.provider=remote").withFallback(config) |var system: ActorSystem = null |implicit def _system: ActorSystem = system |def startSystem(remoting: Boolean = false) = { system = ActorSystem("repl", if(remoting) remoteConfig else config); println("don’t forget to system.terminate()!") } @@ -216,16 +216,16 @@ object AkkaBuild { .getOrElse(Nil) ++ JdkOptions.versionSpecificJavaOptions }, - // all system properties passed to sbt prefixed with "akka." or "aeron." will be passed on to the forked jvms as is + // all system properties passed to sbt prefixed with "pekko." or "aeron." will be passed on to the forked jvms as is Test / javaOptions := { val base = (Test / javaOptions).value - val knownPrefix = Set("akka.", "aeron.") - val akkaSysProps: Seq[String] = + val knownPrefix = Set("pekko.", "aeron.") + val pekkoSysProps: Seq[String] = sys.props.iterator.collect { case (key, value) if knownPrefix.exists(pre => key.startsWith(pre)) => s"-D$key=$value" }.toList - base ++ akkaSysProps + base ++ pekkoSysProps }, // with forked tests the working directory is set to each module's home directory // rather than the Akka root, some tests depend on Akka root being working dir, so reset @@ -244,9 +244,9 @@ object AkkaBuild { } }, Test / parallelExecution := System - .getProperty("akka.parallelExecution", parallelExecutionByDefault.toString) + .getProperty("pekko.parallelExecution", parallelExecutionByDefault.toString) .toBoolean, - Test / logBuffered := System.getProperty("akka.logBufferedTests", "false").toBoolean, + Test / logBuffered := System.getProperty("pekko.logBufferedTests", "false").toBoolean, // show full stack traces and test case durations Test / testOptions += Tests.Argument("-oDF"), mavenLocalResolverSettings, diff --git a/project/MultiNode.scala b/project/MultiNode.scala index 081e446728..8c0e9ba392 100644 --- a/project/MultiNode.scala +++ b/project/MultiNode.scala @@ -23,8 +23,8 @@ object MultiNode extends AutoPlugin { import autoImport._ // MultiJvm tests can be excluded from normal test target an validatePullRequest - // with -Dakka.test.multi-in-test=false - val multiNodeTestInTest: Boolean = sys.props.getOrElse("akka.test.multi-in-test", "true").toBoolean + // with -Dpekko.test.multi-in-test=false + val multiNodeTestInTest: Boolean = sys.props.getOrElse("pekko.test.multi-in-test", "true").toBoolean object CliOptions { val multiNode = CliOption("akka.test.multi-node", false) @@ -59,7 +59,7 @@ object MultiNode extends AutoPlugin { case key: String if knownPrefix.exists(pre => key.startsWith(pre)) => "-D" + key + "=" + System.getProperty(key) } - "-Xmx256m" :: akkaProperties ::: CliOptions.sbtLogNoFormat.ifTrue("-Dakka.test.nocolor=true").toList + "-Xmx256m" :: akkaProperties ::: CliOptions.sbtLogNoFormat.ifTrue("-Dpekko.test.nocolor=true").toList } ++ JdkOptions.versionSpecificJavaOptions private val anyConfigsInThisProject = ScopeFilter(configurations = inAnyConfiguration) @@ -135,7 +135,7 @@ object MultiNodeScalaTest extends AutoPlugin { Seq( MultiJvm / extraOptions := { val src = (MultiJvm / sourceDirectory).value - (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq + (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dpekko.config=" + _.absolutePath).toSeq }, MultiJvm / scalatestOptions := { Seq("-C", "org.scalatest.extra.QuietReporter") ++ diff --git a/project/Paradox.scala b/project/Paradox.scala index 2ed6842b89..96dafb006d 100644 --- a/project/Paradox.scala +++ b/project/Paradox.scala @@ -28,13 +28,13 @@ object Paradox { "extref.samples.base_url" -> "https://developer.lightbend.com/start/?group=akka&project=%s", "extref.ecs.base_url" -> "https://example.lightbend.com/v1/download/%s", "scaladoc.pekko.base_url" -> "https://doc.akka.io/api/akka/2.6/org/apache", - "scaladoc.pekko.http.base_url" -> "https://doc.akka.io/api/akka-http/current/org/apache", + "scaladoc.akka.http.base_url" -> "https://doc.akka.io/api/akka-http/current/org/apache", "javadoc.java.base_url" -> "https://docs.oracle.com/en/java/javase/11/docs/api/java.base/", "javadoc.java.link_style" -> "direct", "javadoc.pekko.base_url" -> "https://doc.akka.io/japi/akka/2.6/org/apache", "javadoc.pekko.link_style" -> "direct", - "javadoc.pekko.http.base_url" -> "https://doc.akka.io/japi/akka-http/current/org/apache", - "javadoc.pekko.http.link_style" -> "frames", + "javadoc.akka.http.base_url" -> "https://doc.akka.io/japi/akka-http/current/org/apache", + "javadoc.akka.http.link_style" -> "frames", "javadoc.com.fasterxml.jackson.annotation.base_url" -> "https://javadoc.io/doc/com.fasterxml.jackson.core/jackson-annotations/latest/", "javadoc.com.fasterxml.jackson.annotation.link_style" -> "direct", "javadoc.com.fasterxml.jackson.databind.base_url" -> "https://javadoc.io/doc/com.fasterxml.jackson.core/jackson-databind/latest/", @@ -47,7 +47,7 @@ object Paradox { "javadoc.org.slf4j.link_style" -> "direct", "scala.version" -> scalaVersion.value, "scala.binary.version" -> scalaBinaryVersion.value, - "akka.version" -> version.value, + "pekko.version" -> version.value, "scalatest.version" -> Dependencies.scalaTestVersion.value, "sigar_loader.version" -> "1.6.6-rev002", "algolia.docsearch.api_key" -> "543bad5ad786495d9ccd445ed34ed082", diff --git a/project/Publish.scala b/project/Publish.scala index 88f4e38a6a..20ab56c52e 100644 --- a/project/Publish.scala +++ b/project/Publish.scala @@ -38,7 +38,7 @@ object Publish extends AutoPlugin { private def akkaPublishTo = Def.setting { val key = new java.io.File( - Option(System.getProperty("akka.gustav.key")) + Option(System.getProperty("pekko.gustav.key")) .getOrElse(System.getProperty("user.home") + "/.ssh/id_rsa_gustav.pem")) if (isSnapshot.value) Resolver.sftp("Akka snapshots", "gustav.akka.io", "/home/akkarepo/www/snapshots").as("akkarepo", key) @@ -47,7 +47,7 @@ object Publish extends AutoPlugin { } private def akkaCredentials: Seq[Credentials] = - Option(System.getProperty("akka.publish.credentials")).map(f => Credentials(new File(f))).toSeq + Option(System.getProperty("pekko.publish.credentials")).map(f => Credentials(new File(f))).toSeq } /** diff --git a/project/ScalafixSupport.scala b/project/ScalafixSupport.scala index b32199b8d5..7f14b58a23 100644 --- a/project/ScalafixSupport.scala +++ b/project/ScalafixSupport.scala @@ -49,5 +49,5 @@ trait ScalafixSupport { } object ScalafixSupport { - def fixTestScope: Boolean = System.getProperty("akka.scalafix.fixTestScope", "false").toBoolean + def fixTestScope: Boolean = System.getProperty("pekko.scalafix.fixTestScope", "false").toBoolean } diff --git a/project/TestExtras.scala b/project/TestExtras.scala index 2e8fd23f4c..66341d8c15 100644 --- a/project/TestExtras.scala +++ b/project/TestExtras.scala @@ -12,11 +12,11 @@ object TestExtras { object Filter { object Keys { val excludeTestNames = settingKey[Set[String]]( - "Names of tests to be excluded. Not supported by MultiJVM tests. Example usage: -Dakka.test.names.exclude=TimingSpec") + "Names of tests to be excluded. Not supported by MultiJVM tests. Example usage: -Dpekko.test.names.exclude=TimingSpec") val excludeTestTags = settingKey[Set[String]]( - "Tags of tests to be excluded. It will not be used if you specify -Dakka.test.tags.only. Example usage: -Dakka.test.tags.exclude=long-running") + "Tags of tests to be excluded. It will not be used if you specify -Dpekko.test.tags.only. Example usage: -Dpekko.test.tags.exclude=long-running") val onlyTestTags = - settingKey[Set[String]]("Tags of tests to be ran. Example usage: -Dakka.test.tags.only=long-running") + settingKey[Set[String]]("Tags of tests to be ran. Example usage: -Dpekko.test.tags.only=long-running") val checkTestsHaveRun = taskKey[Unit]("Verify a number of notable tests have actually run"); } diff --git a/project/VersionGenerator.scala b/project/VersionGenerator.scala index c69d41c4f1..84744e2ac6 100644 --- a/project/VersionGenerator.scala +++ b/project/VersionGenerator.scala @@ -8,20 +8,24 @@ import sbt._ import sbt.Keys._ /** - * Generate version.conf and akka/Version.scala files based on the version setting. + * Generate version.conf and org/apache/pekko/Version.scala files based on the version setting. */ object VersionGenerator { val settings: Seq[Setting[_]] = inConfig(Compile)( Seq( - resourceGenerators += generateVersion(resourceManaged, _ / "version.conf", """|akka.version = "%s" - |"""), - sourceGenerators += generateVersion(sourceManaged, _ / "akka" / "Version.scala", """|package org.apache.pekko - | - |object Version { - | val current: String = "%s" - |} - |"""))) + resourceGenerators += generateVersion( + resourceManaged, _ / "version.conf", + """|pekko.version = "%s" + |"""), + sourceGenerators += generateVersion( + sourceManaged, _ / "org" / "apache" / "pekko" / "Version.scala", + """|package org.apache.pekko + | + |object Version { + | val current: String = "%s" + |} + |"""))) def generateVersion(dir: SettingKey[File], locate: File => File, template: String) = Def.task[Seq[File]] { val file = locate(dir.value)