chore: Use scala.jdk.DurationConverters (#2205)

This commit is contained in:
He-Pin(kerr) 2025-09-20 01:05:11 +08:00 committed by GitHub
parent 875840d3b8
commit 9b2853c730
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
84 changed files with 475 additions and 543 deletions

View file

@ -552,16 +552,16 @@ Scala has proven the most viable way to do it, as long as you keep the following
#### Overview of Scala types and their Java counterparts
| Scala | Java |
|-------|------|
| `scala.Option[T]` | `java.util.Optional<T>` (`OptionalDouble`, ...) |
| `scala.collection.immutable.Seq[T]` | `java.util.List<T>` |
| `scala.concurrent.Future[T]` | `java.util.concurrent.CompletionStage<T>` |
| `scala.concurrent.Promise[T]` | `java.util.concurrent.CompletableFuture<T>` |
| `scala.concurrent.duration.FiniteDuration` | `java.time.Duration` (use `org.apache.pekko.util.JavaDurationConverters`) |
| `T => Unit` | `java.util.function.Consumer<T>` |
| `() => R` (`scala.Function0[R]`) | `java.util.function.Supplier<R>` |
| `T => R` (`scala.Function1[T, R]`) | `java.util.function.Function<T, R>` |
| Scala | Java |
|-------|----------------------------------------------------------------|
| `scala.Option[T]` | `java.util.Optional<T>` (`OptionalDouble`, ...) |
| `scala.collection.immutable.Seq[T]` | `java.util.List<T>` |
| `scala.concurrent.Future[T]` | `java.util.concurrent.CompletionStage<T>` |
| `scala.concurrent.Promise[T]` | `java.util.concurrent.CompletableFuture<T>` |
| `scala.concurrent.duration.FiniteDuration` | `java.time.Duration` (use `scala.jdk.javaapi.DurationConverters`) |
| `T => Unit` | `java.util.function.Consumer<T>` |
| `() => R` (`scala.Function0[R]`) | `java.util.function.Supplier<R>` |
| `T => R` (`scala.Function1[T, R]`) | `java.util.function.Function<T, R>` |
### Contributing new Pekko Streams operators

View file

@ -19,7 +19,7 @@ import org.apache.pekko
import pekko.actor.typed.{ ActorRef, Behavior, Props }
import pekko.annotation.{ DoNotInherit, InternalApi }
import pekko.util.FunctionConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.unused
/**
@ -198,7 +198,7 @@ object Effect {
/**
* Java API
*/
def duration(): java.time.Duration = d.asJava
def duration(): java.time.Duration = d.toJava
}
case object ReceiveTimeoutCancelled extends ReceiveTimeoutCancelled
@ -210,7 +210,7 @@ object Effect {
* FIXME what about events scheduled through the scheduler?
*/
final case class Scheduled[U](delay: FiniteDuration, target: ActorRef[U], message: U) extends Effect {
def duration(): java.time.Duration = delay.asJava
def duration(): java.time.Duration = delay.toJava
}
final case class TimerScheduled[U](
@ -220,11 +220,11 @@ object Effect {
mode: TimerScheduled.TimerMode,
overriding: Boolean)(val send: () => Unit)
extends Effect {
def duration(): java.time.Duration = delay.asJava
def duration(): java.time.Duration = delay.toJava
}
object TimerScheduled {
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
sealed trait TimerMode
case object FixedRateMode extends TimerMode
@ -235,9 +235,9 @@ object Effect {
/*Java API*/
def fixedRateMode = FixedRateMode
def fixedRateMode(initialDelay: java.time.Duration) = FixedRateModeWithInitialDelay(initialDelay.asScala)
def fixedRateMode(initialDelay: java.time.Duration) = FixedRateModeWithInitialDelay(initialDelay.toScala)
def fixedDelayMode = FixedDelayMode
def fixedDelayMode(initialDelay: java.time.Duration) = FixedDelayModeWithInitialDelay(initialDelay.asScala)
def fixedDelayMode(initialDelay: java.time.Duration) = FixedDelayModeWithInitialDelay(initialDelay.toScala)
def singleMode = SingleMode
}

View file

@ -21,7 +21,7 @@ import org.apache.pekko
import pekko.actor.typed.ActorSystem
import pekko.actor.typed.Extension
import pekko.actor.typed.ExtensionId
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.Timeout
object TestKitSettings {
@ -97,5 +97,5 @@ final class TestKitSettings(val config: Config) {
* Java API: Scale the `duration` with the configured `TestTimeFactor`
*/
def dilated(duration: java.time.Duration): java.time.Duration =
dilated(duration.asScala).asJava
dilated(duration.toScala).toJava
}

View file

@ -43,7 +43,7 @@ import pekko.actor.typed.scaladsl.Behaviors
import pekko.annotation.InternalApi
import pekko.japi.function.Creator
import pekko.util.BoxedType
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.PrettyDuration._
import pekko.util.ccompat.JavaConverters._
@ -102,14 +102,14 @@ private[pekko] final class TestProbeImpl[M](name: String, system: ActorSystem[_]
override def remainingOrDefault: FiniteDuration = remainingOr(settings.SingleExpectDefaultTimeout)
override def getRemainingOrDefault: JDuration = remainingOrDefault.asJava
override def getRemainingOrDefault: JDuration = remainingOrDefault.toJava
override def remaining: FiniteDuration = end match {
case f: FiniteDuration => f - now
case _ => assertFail("`remaining` may not be called outside of `within`")
}
override def getRemaining: JDuration = remaining.asJava
override def getRemaining: JDuration = remaining.toJava
override def remainingOr(duration: FiniteDuration): FiniteDuration = end match {
case x if x eq Duration.Undefined => duration
@ -119,7 +119,7 @@ private[pekko] final class TestProbeImpl[M](name: String, system: ActorSystem[_]
}
override def getRemainingOr(duration: JDuration): JDuration =
remainingOr(duration.asScala).asJava
remainingOr(duration.toScala).toJava
override def within[T](min: FiniteDuration, max: FiniteDuration)(f: => T): T =
within_internal(min, max.dilated, f)
@ -128,10 +128,10 @@ private[pekko] final class TestProbeImpl[M](name: String, system: ActorSystem[_]
within_internal(Duration.Zero, max.dilated, f)
override def within[T](min: JDuration, max: JDuration)(f: Supplier[T]): T =
within_internal(min.asScala, max.asScala.dilated, f.get())
within_internal(min.toScala, max.toScala.dilated, f.get())
def within[T](max: JDuration)(f: Supplier[T]): T =
within_internal(Duration.Zero, max.asScala.dilated, f.get())
within_internal(Duration.Zero, max.toScala.dilated, f.get())
private def within_internal[T](min: FiniteDuration, max: FiniteDuration, f: => T): T = {
val start = now
@ -162,13 +162,13 @@ private[pekko] final class TestProbeImpl[M](name: String, system: ActorSystem[_]
override def expectMessage[T <: M](max: FiniteDuration, obj: T): T = expectMessage_internal(max.dilated, obj)
override def expectMessage[T <: M](max: JDuration, obj: T): T =
expectMessage(max.asScala, obj)
expectMessage(max.toScala, obj)
override def expectMessage[T <: M](max: FiniteDuration, hint: String, obj: T): T =
expectMessage_internal(max.dilated, obj, Some(hint))
override def expectMessage[T <: M](max: JDuration, hint: String, obj: T): T =
expectMessage(max.asScala, hint, obj)
expectMessage(max.toScala, hint, obj)
private def expectMessage_internal[T <: M](max: FiniteDuration, obj: T, hint: Option[String] = None): T = {
if (obj.isInstanceOf[Signal])
@ -185,7 +185,7 @@ private[pekko] final class TestProbeImpl[M](name: String, system: ActorSystem[_]
override def receiveMessage(): M = receiveMessage_internal(remainingOrDefault)
override def receiveMessage(max: JDuration): M = receiveMessage(max.asScala)
override def receiveMessage(max: JDuration): M = receiveMessage(max.toScala)
override def receiveMessage(max: FiniteDuration): M = receiveMessage_internal(max.dilated)
@ -212,7 +212,7 @@ private[pekko] final class TestProbeImpl[M](name: String, system: ActorSystem[_]
expectNoMessage_internal(max)
override def expectNoMessage(max: JDuration): Unit =
expectNoMessage(max.asScala)
expectNoMessage(max.toScala)
override def expectNoMessage(): Unit =
expectNoMessage_internal(settings.ExpectNoMessageDefaultTimeout)
@ -232,10 +232,10 @@ private[pekko] final class TestProbeImpl[M](name: String, system: ActorSystem[_]
expectMessageClass_internal(max.dilated, t.runtimeClass.asInstanceOf[Class[T]])
override def expectMessageClass[T <: M](clazz: Class[T]): T =
expectMessageClass_internal(getRemainingOrDefault.asScala, clazz)
expectMessageClass_internal(getRemainingOrDefault.toScala, clazz)
override def expectMessageClass[T <: M](clazz: Class[T], max: JDuration): T =
expectMessageClass_internal(max.asScala.dilated, clazz)
expectMessageClass_internal(max.toScala.dilated, clazz)
private def expectMessageClass_internal[C](max: FiniteDuration, c: Class[C]): C = {
if (classOf[Signal].isAssignableFrom(c)) {
@ -258,10 +258,10 @@ private[pekko] final class TestProbeImpl[M](name: String, system: ActorSystem[_]
receiveMessages_internal(n, max.dilated)
override def receiveSeveralMessages(n: Int): JList[M] =
receiveMessages_internal(n, getRemainingOrDefault.asScala).asJava
receiveMessages_internal(n, getRemainingOrDefault.toScala).asJava
override def receiveSeveralMessages(n: Int, max: JDuration): JList[M] =
receiveMessages_internal(n, max.asScala.dilated).asJava
receiveMessages_internal(n, max.toScala.dilated).asJava
private def receiveMessages_internal(n: Int, max: FiniteDuration): immutable.Seq[M] = {
val stop = max + now
@ -295,7 +295,7 @@ private[pekko] final class TestProbeImpl[M](name: String, system: ActorSystem[_]
max: JDuration,
hint: String,
fisher: java.util.function.Function[M, FishingOutcome]): JList[M] =
fishForMessage_internal(max.asScala.dilated, hint, fisher.apply).asJava
fishForMessage_internal(max.toScala.dilated, hint, fisher.apply).asJava
private def fishForMessage_internal(max: FiniteDuration, hint: String, fisher: M => FishingOutcome): List[M] = {
@tailrec def loop(timeout: FiniteDuration, seen: List[M]): List[M] = {
@ -338,7 +338,7 @@ private[pekko] final class TestProbeImpl[M](name: String, system: ActorSystem[_]
expectTerminated_internal(actorRef, remainingOrDefault)
override def expectTerminated[U](actorRef: ActorRef[U], max: JDuration): Unit =
expectTerminated_internal(actorRef, max.asScala.dilated)
expectTerminated_internal(actorRef, max.toScala.dilated)
private def expectTerminated_internal[U](actorRef: ActorRef[U], max: FiniteDuration): Unit = {
testActor.asInstanceOf[ActorRef[AnyRef]] ! WatchActor(actorRef)
@ -364,7 +364,7 @@ private[pekko] final class TestProbeImpl[M](name: String, system: ActorSystem[_]
awaitAssert_internal(a, remainingOrDefault, 100.millis)
override def awaitAssert[A](max: JDuration, interval: JDuration, creator: Creator[A]): A =
awaitAssert_internal(creator.create(), max.asScala.dilated, interval.asScala)
awaitAssert_internal(creator.create(), max.toScala.dilated, interval.toScala)
def awaitAssert[A](max: JDuration, creator: Creator[A]): A =
awaitAssert(max, JDuration.ofMillis(100), creator)

View file

@ -29,7 +29,7 @@ import pekko.actor.typed.ActorSystem
import pekko.actor.typed.Behavior
import pekko.actor.typed.Props
import pekko.actor.typed.Scheduler
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.Timeout
object ActorTestKit {
@ -123,7 +123,7 @@ object ActorTestKit {
* no exception is thrown.
*/
def shutdown(system: ActorSystem[_], duration: Duration, throwIfShutdownTimesOut: Boolean): Unit = {
TestKitUtils.shutdown(system, duration.asScala, throwIfShutdownTimesOut)
TestKitUtils.shutdown(system, duration.toScala, throwIfShutdownTimesOut)
}
/**
@ -143,7 +143,7 @@ object ActorTestKit {
*/
def shutdown(system: ActorSystem[_]): Unit = {
val settings = TestKitSettings.create(system)
shutdown(system, settings.DefaultActorSystemShutdownTimeout.asJava, settings.ThrowOnShutdownTimeout)
shutdown(system, settings.DefaultActorSystemShutdownTimeout.toJava, settings.ThrowOnShutdownTimeout)
}
/**
@ -222,7 +222,7 @@ final class ActorTestKit private[pekko] (delegate: pekko.actor.testkit.typed.sca
* It can only be used for actors that were spawned by this `ActorTestKit`.
* Other actors will not be stopped by this method.
*/
def stop[T](ref: ActorRef[T], max: Duration): Unit = delegate.stop(ref, max.asScala)
def stop[T](ref: ActorRef[T], max: Duration): Unit = delegate.stop(ref, max.toScala)
/**
* Shortcut for creating a new test probe for the testkit actor system

View file

@ -17,7 +17,7 @@ import java.time.Duration
import org.apache.pekko
import pekko.actor.typed.{ ActorRef, Behavior, Props }
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
/**
* Factories for behavior effects for [[BehaviorTestKit]], each effect has a suitable equals and can be used to compare
@ -94,14 +94,14 @@ object Effects {
/**
* The behavior set a new receive timeout, with `message` as timeout notification
*/
def receiveTimeoutSet[T](d: Duration, message: T): ReceiveTimeoutSet[T] = ReceiveTimeoutSet(d.asScala, message)
def receiveTimeoutSet[T](d: Duration, message: T): ReceiveTimeoutSet[T] = ReceiveTimeoutSet(d.toScala, message)
/**
* The behavior used `context.schedule` to schedule `message` to be sent to `target` after `delay`
* FIXME what about events scheduled through the scheduler?
*/
def scheduled[U](delay: Duration, target: ActorRef[U], message: U): Scheduled[U] =
Scheduled(delay.asScala, target, message)
Scheduled(delay.toScala, target, message)
def timerScheduled[U](
key: Any,
@ -110,7 +110,7 @@ object Effects {
mode: TimerScheduled.TimerMode,
overriding: Boolean,
send: pekko.japi.function.Effect): TimerScheduled[U] =
TimerScheduled(key, msg, delay.asScala, mode, overriding)(send.apply _)
TimerScheduled(key, msg, delay.toScala, mode, overriding)(send.apply _)
/**
* Used to represent an empty list of effects - in other words, the behavior didn't do anything observable

View file

@ -22,7 +22,7 @@ import com.typesafe.config.Config
import org.apache.pekko
import pekko.actor.typed.ActorSystem
import pekko.actor.typed.internal.adapter.SchedulerAdapter
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
/**
* Manual time allows you to do async tests while controlling the scheduler of the system.
@ -70,11 +70,11 @@ final class ManualTime(delegate: pekko.testkit.ExplicitlyTriggeredScheduler) {
* If you want the amount of time passed to be dilated, apply the dilation before passing the delay to
* this method.
*/
def timePasses(amount: Duration): Unit = delegate.timePasses(amount.asScala)
def timePasses(amount: Duration): Unit = delegate.timePasses(amount.toScala)
@varargs
def expectNoMessageFor(duration: Duration, on: TestProbe[_]*): Unit = {
delegate.timePasses(duration.asScala)
delegate.timePasses(duration.toScala)
on.foreach(_.expectNoMessage(Duration.ZERO))
}

View file

@ -23,11 +23,11 @@ import java.util.function.BiFunction;
import org.apache.pekko.actor.*;
import org.apache.pekko.testkit.PekkoJUnitActorSystemResource;
import org.apache.pekko.testkit.PekkoSpec;
import org.apache.pekko.util.JavaDurationConverters;
import org.junit.ClassRule;
import org.junit.Test;
import org.scalatestplus.junit.JUnitSuite;
import scala.concurrent.Await;
import scala.jdk.javaapi.DurationConverters;
import scala.jdk.javaapi.FutureConverters;
public class CircuitBreakerTest extends JUnitSuite {
@ -51,8 +51,7 @@ public class CircuitBreakerTest extends JUnitSuite {
final CompletionStage<String> res = breaker.callWithCircuitBreakerCS(() -> f);
assertEquals(
"hello",
Await.result(
FutureConverters.asScala(res), JavaDurationConverters.asFiniteDuration(fiveSeconds)));
Await.result(FutureConverters.asScala(res), DurationConverters.toScala(fiveSeconds)));
}
@Test
@ -71,8 +70,7 @@ public class CircuitBreakerTest extends JUnitSuite {
final CompletionStage<String> res = breaker.callWithCircuitBreakerCS(() -> f, fn);
assertEquals(
"hello",
Await.result(
FutureConverters.asScala(res), JavaDurationConverters.asFiniteDuration(fiveSeconds)));
Await.result(FutureConverters.asScala(res), DurationConverters.toScala(fiveSeconds)));
assertEquals(1, breaker.currentFailureCount());
}
}

View file

@ -21,7 +21,7 @@ import org.slf4j.event.Level
import org.apache.pekko
import pekko.annotation.DoNotInherit
import pekko.annotation.InternalApi
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
object SupervisorStrategy {
@ -117,7 +117,7 @@ object SupervisorStrategy {
minBackoff: java.time.Duration,
maxBackoff: java.time.Duration,
randomFactor: Double): BackoffSupervisorStrategy =
restartWithBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor)
restartWithBackoff(minBackoff.toScala, maxBackoff.toScala, randomFactor)
/**
* INTERNAL API
@ -168,7 +168,7 @@ object SupervisorStrategy {
copy(maxNrOfRetries, withinTimeRange)
override def withLimit(maxNrOfRetries: Int, withinTimeRange: java.time.Duration): RestartSupervisorStrategy =
copy(maxNrOfRetries, withinTimeRange.asScala)
copy(maxNrOfRetries, withinTimeRange.toScala)
override def withStopChildren(enabled: Boolean): RestartSupervisorStrategy =
copy(stopChildren = enabled)
@ -205,9 +205,9 @@ object SupervisorStrategy {
copy(resetBackoffAfter = timeout)
override def withResetBackoffAfter(timeout: java.time.Duration): BackoffSupervisorStrategy =
withResetBackoffAfter(timeout.asScala)
withResetBackoffAfter(timeout.toScala)
override def getResetBackoffAfter: java.time.Duration = resetBackoffAfter.asJava
override def getResetBackoffAfter: java.time.Duration = resetBackoffAfter.toJava
override def withMaxRestarts(maxRestarts: Int): BackoffSupervisorStrategy =
copy(maxRestarts = maxRestarts)

View file

@ -33,7 +33,7 @@ import pekko.actor.typed.scaladsl.Behaviors
import pekko.annotation.ApiMayChange
import pekko.annotation.DoNotInherit
import pekko.annotation.InternalApi
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
/**
* `ConsumerController` and [[ProducerController]] or [[WorkPullingProducerController]] are used
@ -214,8 +214,8 @@ object ConsumerController {
def apply(config: Config): Settings = {
new Settings(
flowControlWindow = config.getInt("flow-control-window"),
resendIntervalMin = config.getDuration("resend-interval-min").asScala,
resendIntervalMax = config.getDuration("resend-interval-max").asScala,
resendIntervalMin = config.getDuration("resend-interval-min").toScala,
resendIntervalMax = config.getDuration("resend-interval-max").toScala,
onlyFlowControl = config.getBoolean("only-flow-control"))
}
@ -259,19 +259,19 @@ object ConsumerController {
* Java API
*/
def withResendIntervalMin(newResendIntervalMin: JavaDuration): Settings =
copy(resendIntervalMin = newResendIntervalMin.asScala)
copy(resendIntervalMin = newResendIntervalMin.toScala)
/**
* Java API
*/
def withResendIntervalMax(newResendIntervalMax: JavaDuration): Settings =
copy(resendIntervalMax = newResendIntervalMax.asScala)
copy(resendIntervalMax = newResendIntervalMax.toScala)
/**
* Java API
*/
def getResendIntervalMax(): JavaDuration =
resendIntervalMax.asJava
resendIntervalMax.toJava
def withOnlyFlowControl(newOnlyFlowControl: Boolean): Settings =
copy(onlyFlowControl = newOnlyFlowControl)

View file

@ -32,7 +32,7 @@ import pekko.annotation.ApiMayChange
import pekko.annotation.InternalApi
import pekko.util.Helpers.toRootLowerCase
import pekko.util.Helpers.Requiring
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.jdk.OptionConverters._
/**
@ -171,9 +171,9 @@ object ProducerController {
config.getBytes("chunk-large-messages").requiring(_ <= Int.MaxValue, "Too large chunk-large-messages.").toInt
}
new Settings(
durableQueueRequestTimeout = config.getDuration("durable-queue.request-timeout").asScala,
durableQueueRequestTimeout = config.getDuration("durable-queue.request-timeout").toScala,
durableQueueRetryAttempts = config.getInt("durable-queue.retry-attempts"),
durableQueueResendFirstInterval = config.getDuration("durable-queue.resend-first-interval").asScala,
durableQueueResendFirstInterval = config.getDuration("durable-queue.resend-first-interval").toScala,
chunkLargeMessagesBytes)
}
@ -217,19 +217,19 @@ object ProducerController {
* Java API
*/
def withDurableQueueRequestTimeout(newDurableQueueRequestTimeout: JavaDuration): Settings =
copy(durableQueueRequestTimeout = newDurableQueueRequestTimeout.asScala)
copy(durableQueueRequestTimeout = newDurableQueueRequestTimeout.toScala)
/**
* Java API
*/
def withDurableQueueResendFirstInterval(newDurableQueueResendFirstInterval: JavaDuration): Settings =
copy(durableQueueResendFirstInterval = newDurableQueueResendFirstInterval.asScala)
copy(durableQueueResendFirstInterval = newDurableQueueResendFirstInterval.toScala)
/**
* Java API
*/
def getDurableQueueRequestTimeout(): JavaDuration =
durableQueueRequestTimeout.asJava
durableQueueRequestTimeout.toJava
def withChunkLargeMessagesBytes(newChunkLargeMessagesBytes: Int): Settings =
copy(chunkLargeMessagesBytes = newChunkLargeMessagesBytes)

View file

@ -29,7 +29,7 @@ import pekko.actor.typed.delivery.internal.WorkPullingProducerControllerImpl
import pekko.actor.typed.receptionist.ServiceKey
import pekko.actor.typed.scaladsl.Behaviors
import pekko.annotation.ApiMayChange
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.jdk.OptionConverters._
/**
@ -164,7 +164,7 @@ object WorkPullingProducerController {
def apply(config: Config): Settings = {
new Settings(
bufferSize = config.getInt("buffer-size"),
config.getDuration("internal-ask-timeout").asScala,
config.getDuration("internal-ask-timeout").toScala,
ProducerController.Settings(config))
}
@ -198,7 +198,7 @@ object WorkPullingProducerController {
copy(internalAskTimeout = newInternalAskTimeout)
def withInternalAskTimeout(newInternalAskTimeout: java.time.Duration): Settings =
copy(internalAskTimeout = newInternalAskTimeout.asScala)
copy(internalAskTimeout = newInternalAskTimeout.toScala)
def withProducerControllerSettings(newProducerControllerSettings: ProducerController.Settings): Settings =
copy(producerControllerSettings = newProducerControllerSettings)

View file

@ -31,7 +31,7 @@ import pekko.annotation.InternalApi
import scala.concurrent.ExecutionContext
import pekko.pattern.StatusReply
import pekko.util.BoxedType
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.OptionVal
import pekko.util.Timeout
@ -201,10 +201,10 @@ import scala.util.Success
}
override def setReceiveTimeout(duration: java.time.Duration, msg: T): Unit =
setReceiveTimeout(duration.asScala, msg)
setReceiveTimeout(duration.toScala, msg)
override def scheduleOnce[U](delay: java.time.Duration, target: ActorRef[U], msg: U): pekko.actor.Cancellable =
scheduleOnce(delay.asScala, target, msg)
scheduleOnce(delay.toScala, target, msg)
override def spawn[U](behavior: pekko.actor.typed.Behavior[U], name: String): pekko.actor.typed.ActorRef[U] =
spawn(behavior, name, Props.empty)

View file

@ -64,22 +64,22 @@ import scala.concurrent.duration.FiniteDuration
@InternalApi private[pekko] trait TimerSchedulerCrossDslSupport[T]
extends scaladsl.TimerScheduler[T]
with javadsl.TimerScheduler[T] {
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
override final def startTimerWithFixedDelay(key: Any, msg: T, delay: Duration): Unit =
startTimerWithFixedDelay(key, msg, delay.asScala)
startTimerWithFixedDelay(key, msg, delay.toScala)
override final def startTimerWithFixedDelay(key: Any, msg: T, initialDelay: Duration, delay: Duration): Unit =
startTimerWithFixedDelay(key, msg, initialDelay.asScala, delay.asScala)
startTimerWithFixedDelay(key, msg, initialDelay.toScala, delay.toScala)
override final def startTimerAtFixedRate(key: Any, msg: T, interval: Duration): Unit =
startTimerAtFixedRate(key, msg, interval.asScala)
startTimerAtFixedRate(key, msg, interval.toScala)
override final def startTimerAtFixedRate(key: Any, msg: T, initialDelay: Duration, interval: Duration): Unit =
startTimerAtFixedRate(key, msg, initialDelay.asScala, interval.asScala)
startTimerAtFixedRate(key, msg, initialDelay.toScala, interval.toScala)
override final def startSingleTimer(key: Any, msg: T, delay: Duration): Unit =
startSingleTimer(key, msg, delay.asScala)
startSingleTimer(key, msg, delay.toScala)
}
/**

View file

@ -23,7 +23,7 @@ import pekko.actor.typed.scaladsl.AskPattern._
import pekko.japi.function.{ Function => JFunction }
import pekko.pattern.StatusReply
import scala.jdk.FutureConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
/**
* The ask-pattern implements the initiator side of a requestreply protocol.
@ -49,7 +49,7 @@ object AskPattern {
messageFactory: JFunction[ActorRef[Res], Req],
timeout: Duration,
scheduler: Scheduler): CompletionStage[Res] =
actor.ask(messageFactory.apply)(timeout.asScala, scheduler).asJava
actor.ask(messageFactory.apply)(timeout.toScala, scheduler).asJava
/**
* The same as [[ask]] but only for requests that result in a response of type [[pekko.pattern.StatusReply]].
@ -62,6 +62,6 @@ object AskPattern {
messageFactory: JFunction[ActorRef[StatusReply[Res]], Req],
timeout: Duration,
scheduler: Scheduler): CompletionStage[Res] =
actor.askWithStatus(messageFactory.apply)(timeout.asScala, scheduler).asJava
actor.askWithStatus(messageFactory.apply)(timeout.toScala, scheduler).asJava
}

View file

@ -13,9 +13,9 @@
package org.apache.pekko.actor;
import org.apache.pekko.util.JavaDurationConverters;
import scala.concurrent.ExecutionContext;
import scala.concurrent.duration.FiniteDuration;
import scala.jdk.javaapi.DurationConverters;
/**
* An Apache Pekko scheduler service. This one needs one special behavior: if Closeable, it MUST
@ -45,7 +45,7 @@ public abstract class AbstractScheduler extends AbstractSchedulerBase {
*/
public Cancellable scheduleOnce(
final java.time.Duration delay, final Runnable runnable, final ExecutionContext executor) {
return scheduleOnce(JavaDurationConverters.asFiniteDuration(delay), runnable, executor);
return scheduleOnce(DurationConverters.toScala(delay), runnable, executor);
}
/**

View file

@ -20,3 +20,9 @@ ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.util.JavaDu
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.util.JavaDurationConverters#JavaDurationOps.asScala")
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.util.JavaDurationConverters#ScalaDurationOps.asJava$extension")
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.pekko.util.JavaDurationConverters#ScalaDurationOps.asJava")
ProblemFilters.exclude[MissingClassProblem]("org.apache.pekko.util.JavaDurationConverters")
ProblemFilters.exclude[MissingClassProblem]("org.apache.pekko.util.JavaDurationConverters$")
ProblemFilters.exclude[MissingClassProblem]("org.apache.pekko.util.JavaDurationConverters$JavaDurationOps")
ProblemFilters.exclude[MissingClassProblem]("org.apache.pekko.util.JavaDurationConverters$JavaDurationOps$")
ProblemFilters.exclude[MissingClassProblem]("org.apache.pekko.util.JavaDurationConverters$ScalaDurationOps")
ProblemFilters.exclude[MissingClassProblem]("org.apache.pekko.util.JavaDurationConverters$ScalaDurationOps$")

View file

@ -1,35 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* license agreements; and to You under the Apache License, version 2.0:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* This file is part of the Apache Pekko project, which was derived from Akka.
*/
/*
* Copyright (C) 2009-2022 Lightbend Inc. <https://www.lightbend.com>
*/
package org.apache.pekko.util
import java.time.{ Duration => JDuration }
import scala.concurrent.duration.{ Duration, FiniteDuration }
import org.apache.pekko.annotation.InternalStableApi
/**
* INTERNAL API
*/
@InternalStableApi
private[pekko] object JavaDurationConverters {
@inline def asFiniteDuration(duration: JDuration): FiniteDuration = duration.asScala
final implicit class JavaDurationOps(val self: JDuration) extends AnyVal {
@inline def asScala: FiniteDuration = Duration.fromNanos(self.toNanos)
}
final implicit class ScalaDurationOps(val self: Duration) extends AnyVal {
@inline def asJava: JDuration = JDuration.ofNanos(self.toNanos)
}
}

View file

@ -1,39 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* license agreements; and to You under the Apache License, version 2.0:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* This file is part of the Apache Pekko project, which was derived from Akka.
*/
/*
* Copyright (C) 2009-2022 Lightbend Inc. <https://www.lightbend.com>
*/
package org.apache.pekko.util
import java.time.{ Duration => JDuration }
import scala.concurrent.duration.{ Duration, FiniteDuration }
import org.apache.pekko.annotation.InternalStableApi
/**
* INTERNAL API
*/
@InternalStableApi
private[pekko] object JavaDurationConverters {
// Ideally this should have the Scala 3 inline keyword but then Java sources are
// unable to call this method, see https://github.com/lampepfl/dotty/issues/19346
def asFiniteDuration(duration: JDuration): FiniteDuration = duration.asScala
final implicit class JavaDurationOps(val self: JDuration) extends AnyVal {
inline def asScala: FiniteDuration = Duration.fromNanos(self.toNanos)
}
final implicit class ScalaDurationOps(val self: Duration) extends AnyVal {
inline def asJava: JDuration = JDuration.ofNanos(self.toNanos)
}
}

View file

@ -17,13 +17,12 @@ import java.util.Optional
import scala.annotation.nowarn
import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration.Duration
import scala.concurrent.duration.{ Duration, FiniteDuration }
import scala.runtime.BoxedUnit
import org.apache.pekko
import pekko.annotation.DoNotInherit
import pekko.japi.pf.ReceiveBuilder
import pekko.util.JavaDurationConverters
/**
* Java API: compatible with lambda expressions
@ -165,8 +164,12 @@ object AbstractActor {
* than the ordinary actor message processing thread, such as [[java.util.concurrent.CompletionStage]] and [[scala.concurrent.Future]] callbacks.
*/
def getReceiveTimeout(): java.time.Duration = {
import JavaDurationConverters._
receiveTimeout.asJava
if (receiveTimeout.isFinite) {
import scala.jdk.DurationConverters._
receiveTimeout.asInstanceOf[FiniteDuration].toJava
} else {
java.time.Duration.ZERO
}
}
/**
@ -189,8 +192,8 @@ object AbstractActor {
* than the ordinary actor message processing thread, such as [[java.util.concurrent.CompletionStage]] and [[scala.concurrent.Future]] callbacks.
*/
def setReceiveTimeout(timeout: java.time.Duration): Unit = {
import JavaDurationConverters._
setReceiveTimeout(timeout.asScala)
import scala.jdk.DurationConverters._
setReceiveTimeout(timeout.toScala)
}
/**

View file

@ -17,7 +17,7 @@ import scala.concurrent.duration.FiniteDuration
import org.apache.pekko
import org.apache.pekko.japi.function.{ Effect, Function2, Predicate, Predicate2, Procedure, Procedure2, Procedure3 }
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
/**
* Java API: compatible with lambda expressions
@ -123,7 +123,7 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
stateName: S,
stateTimeout: java.time.Duration,
stateFunctionBuilder: FSMStateFunctionBuilder[S, D]): Unit = {
when(stateName, stateTimeout.asScala, stateFunctionBuilder)
when(stateName, stateTimeout.toScala, stateFunctionBuilder)
}
/**
@ -159,7 +159,7 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* @param timeout state timeout for the initial state, overriding the default timeout for that state
*/
final def startWith(stateName: S, stateData: D, timeout: java.time.Duration): Unit = {
startWith(stateName, stateData, timeout.asScala)
startWith(stateName, stateData, timeout.toScala)
}
/**
@ -461,7 +461,7 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* in the mailbox when the new timer was started.
*/
def startTimerWithFixedDelay(name: String, msg: Any, delay: java.time.Duration): Unit =
startTimerWithFixedDelay(name, msg, delay.asScala)
startTimerWithFixedDelay(name, msg, delay.toScala)
/**
* Schedules a message to be sent repeatedly to the `self` actor with a
@ -489,7 +489,7 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* in the mailbox when the new timer was started.
*/
def startTimerAtFixedRate(name: String, msg: Any, interval: java.time.Duration): Unit =
startTimerAtFixedRate(name, msg, interval.asScala)
startTimerAtFixedRate(name, msg, interval.toScala)
/**
* Start a timer that will send `msg` once to the `self` actor after
@ -501,7 +501,7 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* in the mailbox when the new timer was started.
*/
def startSingleTimer(name: String, msg: Any, delay: java.time.Duration): Unit =
startSingleTimer(name, msg, delay.asScala)
startSingleTimer(name, msg, delay.toScala)
/**
* Default reason if calling `stop()`.

View file

@ -30,7 +30,7 @@ import org.apache.pekko
import scala.concurrent.ExecutionContext
import pekko.pattern.ask
import pekko.routing.MurmurHash
import pekko.util.{ Helpers, JavaDurationConverters, Timeout }
import pekko.util.{ Helpers, Timeout }
/**
* An ActorSelection is a logical view of a section of an ActorSystem's tree of Actors,
@ -106,8 +106,8 @@ abstract class ActorSelection extends Serializable {
*/
def resolveOne(timeout: java.time.Duration): CompletionStage[ActorRef] = {
import scala.jdk.FutureConverters._
import JavaDurationConverters._
resolveOne(timeout.asScala).asJava
import scala.jdk.DurationConverters._
resolveOne(timeout.toScala).asJava
}
override def toString: String = {

View file

@ -24,7 +24,7 @@ import language.implicitConversions
import org.apache.pekko
import pekko.annotation.InternalApi
import pekko.routing.{ Deafen, Listen, Listeners }
import pekko.util.{ unused, JavaDurationConverters }
import pekko.util.unused
object FSM {
@ -293,8 +293,8 @@ object FSM {
* Use Duration.Inf to deactivate an existing timeout.
*/
def forMax(timeout: java.time.Duration): State[S, D] = {
import JavaDurationConverters._
forMax(timeout.asScala)
import scala.jdk.DurationConverters._
forMax(timeout.toScala)
}
/**

View file

@ -18,7 +18,7 @@ import pekko.annotation.InternalApi
import pekko.event.Logging
import pekko.event.Logging.{ Error, LogEvent, LogLevel }
import pekko.japi.Util.immutableSeq
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import java.lang.reflect.InvocationTargetException
import java.lang.{ Iterable => JIterable }
@ -487,7 +487,7 @@ case class AllForOneStrategy(
withinTimeRange: java.time.Duration,
decider: SupervisorStrategy.JDecider,
loggingEnabled: Boolean) =
this(maxNrOfRetries, withinTimeRange.asScala, loggingEnabled)(SupervisorStrategy.makeDecider(decider))
this(maxNrOfRetries, withinTimeRange.toScala, loggingEnabled)(SupervisorStrategy.makeDecider(decider))
/**
* Java API
@ -499,7 +499,7 @@ case class AllForOneStrategy(
* Java API
*/
def this(maxNrOfRetries: Int, withinTimeRange: java.time.Duration, decider: SupervisorStrategy.JDecider) =
this(maxNrOfRetries, withinTimeRange.asScala)(SupervisorStrategy.makeDecider(decider))
this(maxNrOfRetries, withinTimeRange.toScala)(SupervisorStrategy.makeDecider(decider))
/**
* Java API
@ -511,7 +511,7 @@ case class AllForOneStrategy(
* Java API
*/
def this(maxNrOfRetries: Int, withinTimeRange: java.time.Duration, trapExit: JIterable[Class[_ <: Throwable]]) =
this(maxNrOfRetries, withinTimeRange.asScala)(SupervisorStrategy.makeDecider(trapExit))
this(maxNrOfRetries, withinTimeRange.toScala)(SupervisorStrategy.makeDecider(trapExit))
/**
* Java API: compatible with lambda expressions
@ -523,7 +523,7 @@ case class AllForOneStrategy(
* Java API: compatible with lambda expressions
*/
def this(maxNrOfRetries: Int, withinTimeRange: java.time.Duration, decider: SupervisorStrategy.Decider) =
this(maxNrOfRetries = maxNrOfRetries, withinTimeRange = withinTimeRange.asScala)(decider)
this(maxNrOfRetries = maxNrOfRetries, withinTimeRange = withinTimeRange.toScala)(decider)
/**
* Java API: compatible with lambda expressions
@ -599,7 +599,7 @@ case class OneForOneStrategy(
withinTimeRange: java.time.Duration,
decider: SupervisorStrategy.JDecider,
loggingEnabled: Boolean) =
this(maxNrOfRetries, withinTimeRange.asScala, loggingEnabled)(SupervisorStrategy.makeDecider(decider))
this(maxNrOfRetries, withinTimeRange.toScala, loggingEnabled)(SupervisorStrategy.makeDecider(decider))
/**
* Java API
@ -611,7 +611,7 @@ case class OneForOneStrategy(
* Java API
*/
def this(maxNrOfRetries: Int, withinTimeRange: java.time.Duration, decider: SupervisorStrategy.JDecider) =
this(maxNrOfRetries, withinTimeRange.asScala)(SupervisorStrategy.makeDecider(decider))
this(maxNrOfRetries, withinTimeRange.toScala)(SupervisorStrategy.makeDecider(decider))
/**
* Java API
@ -623,7 +623,7 @@ case class OneForOneStrategy(
* Java API
*/
def this(maxNrOfRetries: Int, withinTimeRange: java.time.Duration, trapExit: JIterable[Class[_ <: Throwable]]) =
this(maxNrOfRetries, withinTimeRange.asScala)(SupervisorStrategy.makeDecider(trapExit))
this(maxNrOfRetries, withinTimeRange.toScala)(SupervisorStrategy.makeDecider(trapExit))
/**
* Java API: compatible with lambda expressions
@ -635,7 +635,7 @@ case class OneForOneStrategy(
* Java API: compatible with lambda expressions
*/
def this(maxNrOfRetries: Int, withinTimeRange: java.time.Duration, decider: SupervisorStrategy.Decider) =
this(maxNrOfRetries = maxNrOfRetries, withinTimeRange = withinTimeRange.asScala)(decider)
this(maxNrOfRetries = maxNrOfRetries, withinTimeRange = withinTimeRange.toScala)(decider)
def this(loggingEnabled: Boolean, decider: SupervisorStrategy.Decider) =
this(loggingEnabled = loggingEnabled)(decider)

View file

@ -23,7 +23,6 @@ import scala.util.control.NoStackTrace
import org.apache.pekko
import pekko.actor.Scheduler.AtomicCancellable
import pekko.annotation.InternalApi
import pekko.util.JavaDurationConverters
/**
* This exception is thrown by Scheduler.schedule* when scheduling is not
@ -130,8 +129,8 @@ trait Scheduler {
delay: java.time.Duration,
runnable: Runnable,
executor: ExecutionContext): Cancellable = {
import JavaDurationConverters._
scheduleWithFixedDelay(initialDelay.asScala, delay.asScala)(runnable)(executor)
import scala.jdk.DurationConverters._
scheduleWithFixedDelay(initialDelay.toScala, delay.toScala)(runnable)(executor)
}
/**
@ -188,8 +187,8 @@ trait Scheduler {
message: Any,
executor: ExecutionContext,
sender: ActorRef): Cancellable = {
import JavaDurationConverters._
scheduleWithFixedDelay(initialDelay.asScala, delay.asScala, receiver, message)(executor, sender)
import scala.jdk.DurationConverters._
scheduleWithFixedDelay(initialDelay.toScala, delay.toScala, receiver, message)(executor, sender)
}
/**
@ -263,8 +262,8 @@ trait Scheduler {
interval: java.time.Duration,
runnable: Runnable,
executor: ExecutionContext): Cancellable = {
import JavaDurationConverters._
scheduleAtFixedRate(initialDelay.asScala, interval.asScala)(runnable)(executor)
import scala.jdk.DurationConverters._
scheduleAtFixedRate(initialDelay.toScala, interval.toScala)(runnable)(executor)
}
/**
@ -341,8 +340,8 @@ trait Scheduler {
message: Any,
executor: ExecutionContext,
sender: ActorRef): Cancellable = {
import JavaDurationConverters._
scheduleAtFixedRate(initialDelay.asScala, interval.asScala, receiver, message)(executor, sender)
import scala.jdk.DurationConverters._
scheduleAtFixedRate(initialDelay.toScala, interval.toScala, receiver, message)(executor, sender)
}
protected def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)(
@ -381,8 +380,8 @@ trait Scheduler {
message: Any,
executor: ExecutionContext,
sender: ActorRef): Cancellable = {
import JavaDurationConverters._
scheduleOnce(delay.asScala, receiver, message)(executor, sender)
import scala.jdk.DurationConverters._
scheduleOnce(delay.toScala, receiver, message)(executor, sender)
}
/**
@ -420,8 +419,8 @@ trait Scheduler {
* Note: For scheduling within actors `AbstractActorWithTimers` should be preferred.
*/
def scheduleOnce(delay: java.time.Duration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = {
import JavaDurationConverters._
scheduleOnce(delay.asScala, runnable)(executor)
import scala.jdk.DurationConverters._
scheduleOnce(delay.toScala, runnable)(executor)
}
/**

View file

@ -18,7 +18,7 @@ import scala.concurrent.duration.FiniteDuration
import org.apache.pekko
import pekko.annotation.DoNotInherit
import pekko.dispatch.Envelope
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.OptionVal
/**
@ -165,7 +165,7 @@ abstract class UntypedAbstractActorWithTimers extends UntypedAbstractActor with
* in the mailbox when the new timer was started.
*/
final def startTimerWithFixedDelay(key: Any, msg: Any, delay: java.time.Duration): Unit =
startTimerWithFixedDelay(key, msg, delay.asScala)
startTimerWithFixedDelay(key, msg, delay.toScala)
/**
* Java API: Schedules a message to be sent repeatedly to the `self` actor with a
@ -188,7 +188,7 @@ abstract class UntypedAbstractActorWithTimers extends UntypedAbstractActor with
msg: Any,
initialDelay: java.time.Duration,
delay: java.time.Duration): Unit =
startTimerWithFixedDelay(key, msg, initialDelay.asScala, delay.asScala)
startTimerWithFixedDelay(key, msg, initialDelay.toScala, delay.toScala)
/**
* Scala API: Schedules a message to be sent repeatedly to the `self` actor with a
@ -270,7 +270,7 @@ abstract class UntypedAbstractActorWithTimers extends UntypedAbstractActor with
* in the mailbox when the new timer was started.
*/
final def startTimerAtFixedRate(key: Any, msg: Any, interval: java.time.Duration): Unit =
startTimerAtFixedRate(key, msg, interval.asScala)
startTimerAtFixedRate(key, msg, interval.toScala)
/**
* Java API: Schedules a message to be sent repeatedly to the `self` actor with a
@ -302,7 +302,7 @@ abstract class UntypedAbstractActorWithTimers extends UntypedAbstractActor with
msg: Any,
initialDelay: java.time.Duration,
interval: java.time.Duration): Unit =
startTimerAtFixedRate(key, msg, initialDelay.asScala, interval.asScala)
startTimerAtFixedRate(key, msg, initialDelay.toScala, interval.toScala)
/**
* Start a timer that will send `msg` once to the `self` actor after
@ -325,7 +325,7 @@ abstract class UntypedAbstractActorWithTimers extends UntypedAbstractActor with
* in the mailbox when the new timer was started.
*/
final def startSingleTimer(key: Any, msg: Any, timeout: java.time.Duration): Unit =
startSingleTimer(key, msg, timeout.asScala)
startSingleTimer(key, msg, timeout.toScala)
/**
* Check if a timer with a given `key` is active.

View file

@ -30,7 +30,7 @@ import pekko.annotation.InternalApi
import pekko.io.Inet._
import pekko.util.{ ByteString, Helpers }
import pekko.util.Helpers.Requiring
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.ccompat.JavaConverters._
/**
@ -731,7 +731,7 @@ object TcpMessage {
localAddress: InetSocketAddress,
options: JIterable[SocketOption],
timeout: java.time.Duration,
pullMode: Boolean): Command = connect(remoteAddress, localAddress, options, timeout.asScala, pullMode)
pullMode: Boolean): Command = connect(remoteAddress, localAddress, options, timeout.toScala, pullMode)
/**
* Connect to the given `remoteAddress` without binding to a local address and without

View file

@ -17,7 +17,7 @@ import scala.concurrent.duration.{ Duration, FiniteDuration, _ }
import org.apache.pekko
import pekko.annotation.InternalApi
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
object CachePolicy {
@ -28,8 +28,8 @@ object CachePolicy {
final class Ttl private (val value: FiniteDuration) extends CachePolicy {
if (value < Duration.Zero)
throw new IllegalArgumentException(s"TTL values must be a positive value (zero included).")
import pekko.util.JavaDurationConverters._
def getValue: java.time.Duration = value.asJava
import scala.jdk.DurationConverters._
def getValue: java.time.Duration = value.toJava
override def equals(other: Any): Boolean = other match {
case that: Ttl => value == that.value
@ -49,7 +49,7 @@ object CachePolicy {
s"Positive TTL values must be a strictly positive value. Use Ttl.never for zero.")
new Ttl(value)
}
def fromPositive(value: java.time.Duration): Ttl = fromPositive(value.asScala)
def fromPositive(value: java.time.Duration): Ttl = fromPositive(value.toScala)
/**
* INTERNAL API

View file

@ -48,7 +48,7 @@ import pekko.io.dns.IdGenerator.Policy
import pekko.io.dns.internal.{ ResolvConf, ResolvConfParser }
import pekko.util.Helpers
import pekko.util.Helpers.Requiring
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.ccompat.JavaConverters._
/** INTERNAL API */
@ -77,7 +77,7 @@ private[dns] final class DnsSettings(system: ExtendedActorSystem, c: Config) {
}
}
val ResolveTimeout: FiniteDuration = c.getDuration("resolve-timeout").asScala
val ResolveTimeout: FiniteDuration = c.getDuration("resolve-timeout").toScala
val PositiveCachePolicy: CachePolicy = getTtl("positive-ttl")
val NegativeCachePolicy: CachePolicy = getTtl("negative-ttl")

View file

@ -17,7 +17,7 @@ import org.apache.pekko
import pekko.actor.{ ActorRef, OneForOneStrategy, Props, SupervisorStrategy }
import pekko.annotation.{ DoNotInherit, InternalApi }
import pekko.pattern.internal.{ BackoffOnRestartSupervisor, BackoffOnStopSupervisor }
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.concurrent.duration.{ Duration, FiniteDuration }
@ -132,7 +132,7 @@ object BackoffOpts {
minBackoff: java.time.Duration,
maxBackoff: java.time.Duration,
randomFactor: Double): BackoffOnFailureOptions =
onFailure(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor)
onFailure(childProps, childName, minBackoff.toScala, maxBackoff.toScala, randomFactor)
/**
* Back-off options for creating a back-off supervisor actor that expects a child actor to stop on failure.
@ -254,7 +254,7 @@ object BackoffOpts {
minBackoff: java.time.Duration,
maxBackoff: java.time.Duration,
randomFactor: Double): BackoffOnStopOptions =
onStop(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor)
onStop(childProps, childName, minBackoff.toScala, maxBackoff.toScala, randomFactor)
}

View file

@ -32,7 +32,7 @@ import scala.concurrent.ExecutionContext.parasitic
import pekko.pattern.internal.{ CircuitBreakerNoopTelemetry, CircuitBreakerTelemetry }
import pekko.annotation.InternalApi
import scala.jdk.FutureConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
/**
* Companion object providing factory methods for Circuit Breaker which runs callbacks in caller's thread
@ -84,7 +84,7 @@ object CircuitBreaker {
maxFailures: Int,
callTimeout: java.time.Duration,
resetTimeout: java.time.Duration): CircuitBreaker =
apply(scheduler, maxFailures, callTimeout.asScala, resetTimeout.asScala)
apply(scheduler, maxFailures, callTimeout.toScala, resetTimeout.toScala)
/**
* Java API: Lookup a CircuitBreaker in registry.
@ -172,8 +172,8 @@ class CircuitBreaker(
this(
scheduler,
maxFailures,
callTimeout.asScala,
resetTimeout.asScala,
callTimeout.toScala,
resetTimeout.toScala,
maxResetTimeout = 36500.days,
exponentialBackoffFactor = 1.0,
randomFactor = 0.0)(executor)
@ -232,7 +232,7 @@ class CircuitBreaker(
* @param maxResetTimeout the upper bound of resetTimeout
*/
def withExponentialBackoff(maxResetTimeout: java.time.Duration): CircuitBreaker = {
withExponentialBackoff(maxResetTimeout.asScala)
withExponentialBackoff(maxResetTimeout.toScala)
}
/**

View file

@ -22,7 +22,7 @@ import scala.concurrent.ExecutionContext
import org.apache.pekko
import pekko.actor.{ ActorSelection, ClassicActorSystemProvider, Scheduler }
import scala.jdk.FutureConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.annotation.nowarn
@ -107,7 +107,7 @@ object Patterns {
* }}}
*/
def ask(actor: ActorRef, message: Any, timeout: java.time.Duration): CompletionStage[AnyRef] =
scalaAsk(actor, message)(timeout.asScala).asJava.asInstanceOf[CompletionStage[AnyRef]]
scalaAsk(actor, message)(timeout.toScala).asJava.asInstanceOf[CompletionStage[AnyRef]]
/**
* Use for messages whose response is known to be a [[pekko.pattern.StatusReply]]. When a [[pekko.pattern.StatusReply#success]] response
@ -115,7 +115,7 @@ object Patterns {
* failed.
*/
def askWithStatus(actor: ActorRef, message: Any, timeout: java.time.Duration): CompletionStage[AnyRef] =
scalaAskWithStatus(actor, message)(timeout.asScala).asJava.asInstanceOf[CompletionStage[AnyRef]]
scalaAskWithStatus(actor, message)(timeout.toScala).asJava.asInstanceOf[CompletionStage[AnyRef]]
/**
* A variation of ask which allows to implement "replyTo" pattern by including
@ -266,7 +266,7 @@ object Patterns {
* }}}
*/
def ask(selection: ActorSelection, message: Any, timeout: java.time.Duration): CompletionStage[AnyRef] =
scalaAsk(selection, message)(timeout.asScala).asJava.asInstanceOf[CompletionStage[AnyRef]]
scalaAsk(selection, message)(timeout.toScala).asJava.asInstanceOf[CompletionStage[AnyRef]]
/**
* <i>Java API for `org.apache.pekko.pattern.ask`:</i>
@ -334,7 +334,7 @@ object Patterns {
selection: ActorSelection,
messageFactory: japi.function.Function[ActorRef, Any],
timeout: java.time.Duration): CompletionStage[AnyRef] =
extended.ask(selection, messageFactory.apply _)(timeout.asScala).asJava.asInstanceOf[CompletionStage[AnyRef]]
extended.ask(selection, messageFactory.apply _)(timeout.toScala).asJava.asInstanceOf[CompletionStage[AnyRef]]
/**
* Register an onComplete callback on this [[scala.concurrent.Future]] to send
@ -399,7 +399,7 @@ object Patterns {
* is completed with failure [[pekko.pattern.AskTimeoutException]].
*/
def gracefulStop(target: ActorRef, timeout: java.time.Duration): CompletionStage[java.lang.Boolean] =
scalaGracefulStop(target, timeout.asScala).asJava.asInstanceOf[CompletionStage[java.lang.Boolean]]
scalaGracefulStop(target, timeout.toScala).asJava.asInstanceOf[CompletionStage[java.lang.Boolean]]
/**
* Returns a [[scala.concurrent.Future]] that will be completed with success (value `true`) when
@ -434,7 +434,7 @@ object Patterns {
target: ActorRef,
timeout: java.time.Duration,
stopMessage: Any): CompletionStage[java.lang.Boolean] =
scalaGracefulStop(target, timeout.asScala, stopMessage).asJava.asInstanceOf[CompletionStage[java.lang.Boolean]]
scalaGracefulStop(target, timeout.toScala, stopMessage).asJava.asInstanceOf[CompletionStage[java.lang.Boolean]]
/**
* Returns a [[scala.concurrent.Future]] that will be completed with the success or failure of the provided Callable
@ -628,7 +628,7 @@ object Patterns {
require(attempt != null, "Parameter attempt should not be null.")
require(minBackoff != null, "Parameter minBackoff should not be null.")
require(maxBackoff != null, "Parameter minBackoff should not be null.")
scalaRetry(() => attempt.call().asScala, attempts, minBackoff.asScala, maxBackoff.asScala, randomFactor)(
scalaRetry(() => attempt.call().asScala, attempts, minBackoff.toScala, maxBackoff.toScala, randomFactor)(
ec,
scheduler).asJava
}
@ -675,7 +675,7 @@ object Patterns {
scalaRetry(
() => attempt.call().asScala,
(t, e) => shouldRetry.test(t, e),
attempts, minBackoff.asScala, maxBackoff.asScala, randomFactor)(
attempts, minBackoff.toScala, maxBackoff.toScala, randomFactor)(
ec,
scheduler).asJava
}
@ -760,7 +760,7 @@ object Patterns {
scheduler: Scheduler,
ec: ExecutionContext): CompletionStage[T] = {
require(attempt != null, "Parameter attempt should not be null.")
scalaRetry(() => attempt.call().asScala, attempts, delay.asScala)(ec, scheduler).asJava
scalaRetry(() => attempt.call().asScala, attempts, delay.toScala)(ec, scheduler).asJava
}
/**
@ -793,7 +793,7 @@ object Patterns {
scheduler: Scheduler,
ec: ExecutionContext): CompletionStage[T] = {
require(attempt != null, "Parameter attempt should not be null.")
scalaRetry(() => attempt.call().asScala, (t, e) => shouldRetry.test(t, e), attempts, delay.asScala)(ec,
scalaRetry(() => attempt.call().asScala, (t, e) => shouldRetry.test(t, e), attempts, delay.toScala)(ec,
scheduler).asJava
}
@ -821,7 +821,7 @@ object Patterns {
scalaRetry(
() => attempt.call().asScala,
attempts,
attempted => delayFunction.apply(attempted).toScala.map(_.asScala))(context, scheduler).asJava
attempted => delayFunction.apply(attempted).toScala.map(_.toScala))(context, scheduler).asJava
}
/**
@ -863,6 +863,6 @@ object Patterns {
() => attempt.call().asScala,
(t, e) => shouldRetry.test(t, e),
attempts,
attempted => delayFunction.apply(attempted).toScala.map(_.asScala))(context, scheduler).asJava
attempted => delayFunction.apply(attempted).toScala.map(_.toScala))(context, scheduler).asJava
}
}

View file

@ -25,7 +25,7 @@ import com.typesafe.config.Config
import org.apache.pekko
import pekko.actor._
import pekko.annotation.InternalApi
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
trait OptimalSizeExploringResizer extends Resizer {
@ -71,8 +71,8 @@ case object OptimalSizeExploringResizer {
lowerBound = resizerCfg.getInt("lower-bound"),
upperBound = resizerCfg.getInt("upper-bound"),
chanceOfScalingDownWhenFull = resizerCfg.getDouble("chance-of-ramping-down-when-full"),
actionInterval = resizerCfg.getDuration("action-interval").asScala,
downsizeAfterUnderutilizedFor = resizerCfg.getDuration("downsize-after-underutilized-for").asScala,
actionInterval = resizerCfg.getDuration("action-interval").toScala,
downsizeAfterUnderutilizedFor = resizerCfg.getDuration("downsize-after-underutilized-for").toScala,
numOfAdjacentSizesToConsiderDuringOptimization = resizerCfg.getInt("optimization-range"),
exploreStepSize = resizerCfg.getDouble("explore-step-size"),
explorationProbability = resizerCfg.getDouble("chance-of-exploration"),
@ -269,7 +269,7 @@ case class DefaultOptimalSizeExploringResizer(
val now = LocalDateTime.now
val proposedChange =
if (record.underutilizationStreak.fold(false)(
_.start.isBefore(now.minus(downsizeAfterUnderutilizedFor.asJava)))) {
_.start.isBefore(now.minus(downsizeAfterUnderutilizedFor.asInstanceOf[FiniteDuration].toJava)))) {
val downsizeTo = (record.underutilizationStreak.get.highestUtilization * downsizeRatio).toInt
Math.min(downsizeTo - currentSize, 0)
} else if (performanceLog.isEmpty || record.underutilizationStreak.isDefined) {

View file

@ -32,7 +32,7 @@ import pekko.japi.Util.immutableSeq
import pekko.pattern.ask
import pekko.pattern.pipe
import pekko.util.Helpers.ConfigOps
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.Timeout
/**
@ -142,7 +142,7 @@ final case class ScatterGatherFirstCompletedPool(
* @param within expecting at least one reply within this duration, otherwise
* it will reply with [[pekko.pattern.AskTimeoutException]] in a [[pekko.actor.Status.Failure]]
*/
def this(nr: Int, within: java.time.Duration) = this(nr, within.asScala)
def this(nr: Int, within: java.time.Duration) = this(nr, within.toScala)
override def createRouter(system: ActorSystem): Router = new Router(ScatterGatherFirstCompletedRoutingLogic(within))
@ -218,7 +218,7 @@ final case class ScatterGatherFirstCompletedGroup(
* it will reply with [[pekko.pattern.AskTimeoutException]] in a [[pekko.actor.Status.Failure]]
*/
def this(routeePaths: java.lang.Iterable[String], within: java.time.Duration) =
this(immutableSeq(routeePaths), within.asScala)
this(immutableSeq(routeePaths), within.toScala)
override def paths(system: ActorSystem): immutable.Iterable[String] = this.paths

View file

@ -28,7 +28,7 @@ import pekko.dispatch.Dispatchers
import pekko.japi.Util.immutableSeq
import pekko.pattern.{ ask, pipe, AskTimeoutException }
import pekko.util.Helpers.ConfigOps
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.Timeout
/**
@ -197,7 +197,7 @@ final case class TailChoppingPool(
* @param interval duration after which next routee will be picked
*/
def this(nr: Int, within: java.time.Duration, interval: java.time.Duration) =
this(nr, within.asScala, interval.asScala)
this(nr, within.toScala, interval.toScala)
override def createRouter(system: ActorSystem): Router =
new Router(
@ -291,7 +291,7 @@ final case class TailChoppingGroup(
* @param interval duration after which next routee will be picked
*/
def this(routeePaths: java.lang.Iterable[String], within: java.time.Duration, interval: java.time.Duration) =
this(immutableSeq(routeePaths), within.asScala, interval.asScala)
this(immutableSeq(routeePaths), within.toScala, interval.toScala)
override def createRouter(system: ActorSystem): Router =
new Router(

View file

@ -47,8 +47,8 @@ object Timeout {
* Create a Timeout from java.time.Duration.
*/
def create(duration: java.time.Duration): Timeout = {
import JavaDurationConverters._
new Timeout(duration.asScala)
import scala.jdk.DurationConverters._
new Timeout(duration.toScala)
}
implicit def durationToTimeout(duration: FiniteDuration): Timeout = new Timeout(duration)

View file

@ -20,7 +20,7 @@ import pekko.actor.typed.ActorRef
import pekko.cluster.sharding.ShardRegion.ClusterShardingStats
import pekko.cluster.sharding.ShardRegion.CurrentShardRegionState
import pekko.cluster.sharding.typed.scaladsl.EntityTypeKey
import pekko.util.JavaDurationConverters
import scala.jdk.DurationConverters._
/**
* Protocol for querying sharding state e.g. A ShardRegion's state
@ -77,5 +77,5 @@ final case class GetClusterShardingStats(
entityTypeKey: javadsl.EntityTypeKey[_],
timeout: java.time.Duration,
replyTo: ActorRef[ClusterShardingStats]) =
this(entityTypeKey.asScala, JavaDurationConverters.asFiniteDuration(timeout), replyTo)
this(entityTypeKey.asScala, timeout.toScala, replyTo)
}

View file

@ -28,7 +28,7 @@ import pekko.cluster.typed.Cluster
import pekko.cluster.typed.ClusterSingletonManagerSettings
import pekko.coordination.lease.LeaseUsageSettings
import pekko.japi.Util.immutableSeq
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
object ClusterShardingSettings {
@ -315,11 +315,11 @@ object ClusterShardingSettings {
def withTimeout(timeout: FiniteDuration): IdleSettings = copy(timeout = timeout)
def withTimeout(timeout: java.time.Duration): IdleSettings = withTimeout(timeout.asScala)
def withTimeout(timeout: java.time.Duration): IdleSettings = withTimeout(timeout.toScala)
def withInterval(interval: FiniteDuration): IdleSettings = copy(interval = Some(interval))
def withInterval(interval: java.time.Duration): IdleSettings = withInterval(interval.asScala)
def withInterval(interval: java.time.Duration): IdleSettings = withInterval(interval.toScala)
private def copy(timeout: FiniteDuration = timeout, interval: Option[FiniteDuration] = interval): IdleSettings =
new IdleSettings(timeout, interval)
@ -708,37 +708,37 @@ object ClusterShardingSettings {
def withBufferSize(value: Int): TuningParameters = copy(bufferSize = value)
def withCoordinatorFailureBackoff(value: FiniteDuration): TuningParameters = copy(coordinatorFailureBackoff = value)
def withCoordinatorFailureBackoff(value: java.time.Duration): TuningParameters =
withCoordinatorFailureBackoff(value.asScala)
withCoordinatorFailureBackoff(value.toScala)
def withEntityRecoveryConstantRateStrategyFrequency(value: FiniteDuration): TuningParameters =
copy(entityRecoveryConstantRateStrategyFrequency = value)
def withEntityRecoveryConstantRateStrategyFrequency(value: java.time.Duration): TuningParameters =
withEntityRecoveryConstantRateStrategyFrequency(value.asScala)
withEntityRecoveryConstantRateStrategyFrequency(value.toScala)
def withEntityRecoveryConstantRateStrategyNumberOfEntities(value: Int): TuningParameters =
copy(entityRecoveryConstantRateStrategyNumberOfEntities = value)
def withEntityRecoveryStrategy(value: java.lang.String): TuningParameters = copy(entityRecoveryStrategy = value)
def withEntityRestartBackoff(value: FiniteDuration): TuningParameters = copy(entityRestartBackoff = value)
def withEntityRestartBackoff(value: java.time.Duration): TuningParameters = withEntityRestartBackoff(value.asScala)
def withEntityRestartBackoff(value: java.time.Duration): TuningParameters = withEntityRestartBackoff(value.toScala)
def withHandOffTimeout(value: FiniteDuration): TuningParameters = copy(handOffTimeout = value)
def withHandOffTimeout(value: java.time.Duration): TuningParameters = withHandOffTimeout(value.asScala)
def withHandOffTimeout(value: java.time.Duration): TuningParameters = withHandOffTimeout(value.toScala)
def withKeepNrOfBatches(value: Int): TuningParameters = copy(keepNrOfBatches = value)
def withLeastShardAllocationMaxSimultaneousRebalance(value: Int): TuningParameters =
copy(leastShardAllocationMaxSimultaneousRebalance = value)
def withLeastShardAllocationRebalanceThreshold(value: Int): TuningParameters =
copy(leastShardAllocationRebalanceThreshold = value)
def withRebalanceInterval(value: FiniteDuration): TuningParameters = copy(rebalanceInterval = value)
def withRebalanceInterval(value: java.time.Duration): TuningParameters = withRebalanceInterval(value.asScala)
def withRebalanceInterval(value: java.time.Duration): TuningParameters = withRebalanceInterval(value.toScala)
def withRetryInterval(value: FiniteDuration): TuningParameters = copy(retryInterval = value)
def withRetryInterval(value: java.time.Duration): TuningParameters = withRetryInterval(value.asScala)
def withRetryInterval(value: java.time.Duration): TuningParameters = withRetryInterval(value.toScala)
def withShardFailureBackoff(value: FiniteDuration): TuningParameters = copy(shardFailureBackoff = value)
def withShardFailureBackoff(value: java.time.Duration): TuningParameters = withShardFailureBackoff(value.asScala)
def withShardFailureBackoff(value: java.time.Duration): TuningParameters = withShardFailureBackoff(value.toScala)
def withShardStartTimeout(value: FiniteDuration): TuningParameters = copy(shardStartTimeout = value)
def withShardStartTimeout(value: java.time.Duration): TuningParameters = withShardStartTimeout(value.asScala)
def withShardStartTimeout(value: java.time.Duration): TuningParameters = withShardStartTimeout(value.toScala)
def withSnapshotAfter(value: Int): TuningParameters = copy(snapshotAfter = value)
def withUpdatingStateTimeout(value: FiniteDuration): TuningParameters = copy(updatingStateTimeout = value)
def withUpdatingStateTimeout(value: java.time.Duration): TuningParameters = withUpdatingStateTimeout(value.asScala)
def withUpdatingStateTimeout(value: java.time.Duration): TuningParameters = withUpdatingStateTimeout(value.toScala)
def withWaitingForStateTimeout(value: FiniteDuration): TuningParameters = copy(waitingForStateTimeout = value)
def withWaitingForStateTimeout(value: java.time.Duration): TuningParameters =
withWaitingForStateTimeout(value.asScala)
withWaitingForStateTimeout(value.toScala)
def withCoordinatorStateWriteMajorityPlus(value: Int): TuningParameters =
copy(coordinatorStateWriteMajorityPlus = value)
def withCoordinatorStateReadMajorityPlus(value: Int): TuningParameters =
@ -887,7 +887,7 @@ final class ClusterShardingSettings(
copy(shardRegionQueryTimeout = duration)
def withShardRegionQueryTimeout(duration: java.time.Duration): ClusterShardingSettings =
copy(shardRegionQueryTimeout = duration.asScala)
copy(shardRegionQueryTimeout = duration.toScala)
def withLeaseSettings(leaseSettings: LeaseUsageSettings) = copy(leaseSettings = Option(leaseSettings))

View file

@ -22,7 +22,7 @@ import com.typesafe.config.Config
import org.apache.pekko
import pekko.actor.typed.ActorSystem
import pekko.annotation.InternalApi
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
object ShardedDaemonProcessSettings {
@ -39,7 +39,7 @@ object ShardedDaemonProcessSettings {
* Load settings from a specific config location.
*/
def fromConfig(config: Config): ShardedDaemonProcessSettings = {
val keepAliveInterval = config.getDuration("keep-alive-interval").asScala
val keepAliveInterval = config.getDuration("keep-alive-interval").toScala
new ShardedDaemonProcessSettings(keepAliveInterval, None, None)
}
@ -68,7 +68,7 @@ final class ShardedDaemonProcessSettings @InternalApi private[pekko] (
* Note: How the sharded set is kept alive may change in the future meaning this setting may go away.
*/
def withKeepAliveInterval(keepAliveInterval: Duration): ShardedDaemonProcessSettings =
copy(keepAliveInterval = keepAliveInterval.asScala)
copy(keepAliveInterval = keepAliveInterval.toScala)
/**
* Specify sharding settings that should be used for the sharded daemon process instead of loading from config.

View file

@ -32,7 +32,7 @@ import pekko.actor.typed.scaladsl.Behaviors
import pekko.annotation.ApiMayChange
import pekko.cluster.sharding.typed.ShardingEnvelope
import pekko.cluster.sharding.typed.delivery.internal.ShardingProducerControllerImpl
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.jdk.OptionConverters._
/**
@ -185,9 +185,9 @@ object ShardingProducerController {
def apply(config: Config): Settings = {
new Settings(
bufferSize = config.getInt("buffer-size"),
config.getDuration("internal-ask-timeout").asScala,
config.getDuration("cleanup-unused-after").asScala,
config.getDuration("resend-first-unconfirmed-idle-timeout").asScala,
config.getDuration("internal-ask-timeout").toScala,
config.getDuration("cleanup-unused-after").toScala,
config.getDuration("resend-first-unconfirmed-idle-timeout").toScala,
ProducerController.Settings(config))
}
@ -223,19 +223,19 @@ object ShardingProducerController {
copy(internalAskTimeout = newInternalAskTimeout)
def withInternalAskTimeout(newInternalAskTimeout: java.time.Duration): Settings =
copy(internalAskTimeout = newInternalAskTimeout.asScala)
copy(internalAskTimeout = newInternalAskTimeout.toScala)
def withCleanupUnusedAfter(newCleanupUnusedAfter: FiniteDuration): Settings =
copy(cleanupUnusedAfter = newCleanupUnusedAfter)
def withCleanupUnusedAfter(newCleanupUnusedAfter: java.time.Duration): Settings =
copy(cleanupUnusedAfter = newCleanupUnusedAfter.asScala)
copy(cleanupUnusedAfter = newCleanupUnusedAfter.toScala)
def withResendFirstUnconfirmedIdleTimeout(newResendFirstUnconfirmedIdleTimeout: FiniteDuration): Settings =
copy(resendFirstUnconfirmedIdleTimeout = newResendFirstUnconfirmedIdleTimeout)
def withResendFirstUnconfirmedIdleTimeout(newResendFirstUnconfirmedIdleTimeout: java.time.Duration): Settings =
copy(resendFirstUnconfirmedIdleTimeout = newResendFirstUnconfirmedIdleTimeout.asScala)
copy(resendFirstUnconfirmedIdleTimeout = newResendFirstUnconfirmedIdleTimeout.toScala)
def withProducerControllerSettings(newProducerControllerSettings: ProducerController.Settings): Settings =
copy(producerControllerSettings = newProducerControllerSettings)

View file

@ -51,7 +51,7 @@ import pekko.pattern.PromiseActorRef
import pekko.pattern.StatusReply
import pekko.util.{ unused, ByteString, Timeout }
import scala.jdk.FutureConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
/**
* INTERNAL API
@ -346,13 +346,13 @@ import pekko.util.JavaDurationConverters._
}
override def ask[U](message: JFunction[ActorRef[U], M], timeout: Duration): CompletionStage[U] =
ask[U](replyTo => message.apply(replyTo))(timeout.asScala).asJava
ask[U](replyTo => message.apply(replyTo))(timeout.toScala).asJava
override def askWithStatus[Res](f: ActorRef[StatusReply[Res]] => M)(implicit timeout: Timeout): Future[Res] =
StatusReply.flattenStatusFuture(ask[StatusReply[Res]](f))
override def askWithStatus[Res](f: ActorRef[StatusReply[Res]] => M, timeout: Duration): CompletionStage[Res] =
askWithStatus(f.apply)(timeout.asScala).asJava
askWithStatus(f.apply)(timeout.toScala).asJava
/** Similar to [[pekko.actor.typed.scaladsl.AskPattern.PromiseRef]] but for an `EntityRef` target. */
@InternalApi

View file

@ -29,7 +29,7 @@ import pekko.cluster.sharding.typed.scaladsl
import pekko.japi.function.{ Function => JFunction }
import pekko.pattern.StatusReply
import scala.jdk.FutureConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.Timeout
/**
@ -57,10 +57,10 @@ import pekko.util.Timeout
}
def ask[U](message: JFunction[ActorRef[U], M], timeout: Duration): CompletionStage[U] =
ask[U](replyTo => message.apply(replyTo))(timeout.asScala).asJava
ask[U](replyTo => message.apply(replyTo))(timeout.toScala).asJava
override def askWithStatus[Res](f: ActorRef[StatusReply[Res]] => M, timeout: Duration): CompletionStage[Res] =
askWithStatus(f)(timeout.asScala).asJava
askWithStatus(f)(timeout.toScala).asJava
override def askWithStatus[Res](f: ActorRef[StatusReply[Res]] => M)(implicit timeout: Timeout): Future[Res] =
StatusReply.flattenStatusFuture(ask(f))

View file

@ -23,7 +23,7 @@ import pekko.util.Timeout
import pekko.annotation.InternalApi
import pekko.pattern.AskTimeoutException
import pekko.util.ccompat.JavaConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
@ -38,7 +38,7 @@ private[pekko] object ClusterShardingHealthCheckSettings {
def apply(config: Config): ClusterShardingHealthCheckSettings =
new ClusterShardingHealthCheckSettings(
config.getStringList("names").asScala.toSet,
config.getDuration("timeout").asScala)
config.getDuration("timeout").toScala)
}
@ApiMayChange

View file

@ -22,7 +22,7 @@ import pekko.cluster.singleton.ClusterSingletonManagerSettings
import pekko.coordination.lease.LeaseUsageSettings
import pekko.japi.Util.immutableSeq
import pekko.util.Helpers.toRootLowerCase
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import com.typesafe.config.Config
import scala.collection.immutable
@ -104,7 +104,7 @@ object ClusterShardingSettings {
val lease = config.getString("use-lease") match {
case s if s.isEmpty => None
case other => Some(new LeaseUsageSettings(other, config.getDuration("lease-retry-interval").asScala))
case other => Some(new LeaseUsageSettings(other, config.getDuration("lease-retry-interval").toScala))
}
new ClusterShardingSettings(
@ -271,11 +271,11 @@ object ClusterShardingSettings {
def withTimeout(timeout: FiniteDuration): IdleSettings = copy(timeout = timeout)
def withTimeout(timeout: java.time.Duration): IdleSettings = withTimeout(timeout.asScala)
def withTimeout(timeout: java.time.Duration): IdleSettings = withTimeout(timeout.toScala)
def withInterval(interval: FiniteDuration): IdleSettings = copy(interval = Some(interval))
def withInterval(interval: java.time.Duration): IdleSettings = withInterval(interval.asScala)
def withInterval(interval: java.time.Duration): IdleSettings = withInterval(interval.toScala)
private def copy(timeout: FiniteDuration = timeout, interval: Option[FiniteDuration] = interval): IdleSettings =
new IdleSettings(timeout, interval)
@ -1019,7 +1019,7 @@ final class ClusterShardingSettings(
copy(shardRegionQueryTimeout = duration)
def withShardRegionQueryTimeout(duration: java.time.Duration): ClusterShardingSettings =
copy(shardRegionQueryTimeout = duration.asScala)
copy(shardRegionQueryTimeout = duration.toScala)
def withLeaseSettings(leaseSettings: LeaseUsageSettings): ClusterShardingSettings =
copy(leaseSettings = Some(leaseSettings))

View file

@ -39,7 +39,7 @@ import pekko.cluster.sharding.ShardRegion.ShardId
import pekko.event.Logging
import pekko.pattern.AskTimeoutException
import pekko.util.Timeout
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
object ExternalShardAllocationStrategy {
@ -66,7 +66,7 @@ object ExternalShardAllocationStrategy {
*/
def create(systemProvider: ClassicActorSystemProvider, typeName: String, timeout: java.time.Duration)
: ExternalShardAllocationStrategy =
this.apply(systemProvider, typeName, timeout.asScala)
this.apply(systemProvider, typeName, timeout.toScala)
// local only messages
private[pekko] final case class GetShardLocation(shard: ShardId)

View file

@ -45,7 +45,7 @@ import pekko.dispatch.MessageDispatcher
import pekko.event.Logging
import pekko.pattern.ask
import scala.jdk.FutureConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.PrettyDuration._
import pekko.util.Timeout
import pekko.util.ccompat.JavaConverters._
@ -66,7 +66,7 @@ final private[external] class ExternalShardAllocationClientImpl(system: ActorSys
private val timeout =
system.settings.config
.getDuration("pekko.cluster.sharding.external-shard-allocation-strategy.client-timeout")
.asScala
.toScala
private implicit val askTimeout: Timeout = Timeout(timeout * 2)
private implicit val ec: MessageDispatcher = system.dispatchers.internalDispatcher

View file

@ -47,7 +47,7 @@ import pekko.event.Logging
import pekko.event.MarkerLoggingAdapter
import pekko.pattern.ask
import pekko.pattern.pipe
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.Timeout
object ClusterSingletonManagerSettings {
@ -70,7 +70,7 @@ object ClusterSingletonManagerSettings {
val lease = config.getString("use-lease") match {
case s if s.isEmpty => None
case leaseConfigPath =>
Some(new LeaseUsageSettings(leaseConfigPath, config.getDuration("lease-retry-interval").asScala))
Some(new LeaseUsageSettings(leaseConfigPath, config.getDuration("lease-retry-interval").toScala))
}
new ClusterSingletonManagerSettings(
singletonName = config.getString("singleton-name"),

View file

@ -25,7 +25,7 @@ import pekko.annotation.InternalApi
import pekko.cluster.{ ddata => dd }
import pekko.cluster.ddata.ReplicatedData
import pekko.pattern.ask
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.Timeout
/**
@ -84,7 +84,7 @@ import pekko.util.Timeout
case cmd: JReplicator.Get[d] =>
implicit val timeout: Timeout = Timeout(cmd.consistency.timeout match {
case java.time.Duration.ZERO => localAskTimeout
case t => t.asScala + additionalAskTimeout
case t => t.toScala + additionalAskTimeout
})
import ctx.executionContext
val reply =
@ -112,7 +112,7 @@ import pekko.util.Timeout
case cmd: JReplicator.Update[d] =>
implicit val timeout: Timeout = Timeout(cmd.writeConsistency.timeout match {
case java.time.Duration.ZERO => localAskTimeout
case t => t.asScala + additionalAskTimeout
case t => t.toScala + additionalAskTimeout
})
import ctx.executionContext
val reply =
@ -179,7 +179,7 @@ import pekko.util.Timeout
case cmd: JReplicator.Delete[d] =>
implicit val timeout: Timeout = Timeout(cmd.consistency.timeout match {
case java.time.Duration.ZERO => localAskTimeout
case t => t.asScala + additionalAskTimeout
case t => t.toScala + additionalAskTimeout
})
import ctx.executionContext
val reply =

View file

@ -27,7 +27,7 @@ import pekko.annotation.DoNotInherit
import pekko.annotation.InternalApi
import pekko.cluster.ddata.ReplicatedData
import pekko.cluster.ddata.SelfUniqueAddress
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
object DistributedData extends ExtensionId[DistributedData] {
def get(system: ActorSystem[_]): DistributedData = apply(system)
@ -62,7 +62,7 @@ object DistributedData extends ExtensionId[DistributedData] {
new ReplicatorMessageAdapter[A, B](
context,
distributedData.replicator,
distributedData.unexpectedAskTimeout.asJava)
distributedData.unexpectedAskTimeout.toJava)
factory(replicatorAdapter)
}
}

View file

@ -27,7 +27,7 @@ import pekko.cluster.{ ddata => dd }
import pekko.cluster.ddata.Key
import pekko.cluster.ddata.ReplicatedData
import pekko.cluster.ddata.typed.internal.ReplicatorBehavior
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
/**
* @see [[pekko.cluster.ddata.Replicator]].
@ -66,18 +66,18 @@ object Replicator {
require(n >= 2, "ReadFrom n must be >= 2, use ReadLocal for n=1")
/** INTERNAL API */
@InternalApi private[pekko] override def toClassic = dd.Replicator.ReadFrom(n, timeout.asScala)
@InternalApi private[pekko] override def toClassic = dd.Replicator.ReadFrom(n, timeout.toScala)
}
final case class ReadMajority(timeout: Duration, minCap: Int = DefaultMajorityMinCap) extends ReadConsistency {
def this(timeout: Duration) = this(timeout, DefaultMajorityMinCap)
/** INTERNAL API */
@InternalApi private[pekko] override def toClassic = dd.Replicator.ReadMajority(timeout.asScala, minCap)
@InternalApi private[pekko] override def toClassic = dd.Replicator.ReadMajority(timeout.toScala, minCap)
}
final case class ReadAll(timeout: Duration) extends ReadConsistency {
/** INTERNAL API */
@InternalApi private[pekko] override def toClassic = dd.Replicator.ReadAll(timeout.asScala)
@InternalApi private[pekko] override def toClassic = dd.Replicator.ReadAll(timeout.toScala)
}
sealed trait WriteConsistency {
@ -96,18 +96,18 @@ object Replicator {
require(n >= 2, "WriteTo n must be >= 2, use WriteLocal for n=1")
/** INTERNAL API */
@InternalApi private[pekko] override def toClassic = dd.Replicator.WriteTo(n, timeout.asScala)
@InternalApi private[pekko] override def toClassic = dd.Replicator.WriteTo(n, timeout.toScala)
}
final case class WriteMajority(timeout: Duration, minCap: Int = DefaultMajorityMinCap) extends WriteConsistency {
def this(timeout: Duration) = this(timeout, DefaultMajorityMinCap)
/** INTERNAL API */
@InternalApi private[pekko] override def toClassic = dd.Replicator.WriteMajority(timeout.asScala, minCap)
@InternalApi private[pekko] override def toClassic = dd.Replicator.WriteMajority(timeout.toScala, minCap)
}
final case class WriteAll(timeout: Duration) extends WriteConsistency {
/** INTERNAL API */
@InternalApi private[pekko] override def toClassic = dd.Replicator.WriteAll(timeout.asScala)
@InternalApi private[pekko] override def toClassic = dd.Replicator.WriteAll(timeout.toScala)
}
/**

View file

@ -26,7 +26,7 @@ import pekko.actor.typed.ActorRef
import pekko.actor.typed.javadsl.ActorContext
import pekko.cluster.ddata.Key
import pekko.cluster.ddata.ReplicatedData
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.Timeout
/**
@ -66,7 +66,7 @@ class ReplicatorMessageAdapter[A, B <: ReplicatedData](
replicator: ActorRef[Replicator.Command],
unexpectedAskTimeout: Duration) {
private implicit val askTimeout: Timeout = Timeout(unexpectedAskTimeout.asScala)
private implicit val askTimeout: Timeout = Timeout(unexpectedAskTimeout.toScala)
private var changedMessageAdapters: Map[Key[B], ActorRef[Replicator.SubscribeResponse[B]]] = Map.empty

View file

@ -28,7 +28,7 @@ import pekko.cluster.{ ddata => dd }
import pekko.cluster.Cluster
import pekko.cluster.ddata.ReplicatedData
import pekko.cluster.ddata.SelfUniqueAddress
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
object DistributedData extends ExtensionId[DistributedData] {
def get(system: ActorSystem[_]): DistributedData = apply(system)
@ -85,7 +85,7 @@ class DistributedData(system: ActorSystem[_]) extends Extension {
@InternalApi private[pekko] val unexpectedAskTimeout: FiniteDuration =
system.settings.config
.getDuration("pekko.cluster.ddata.typed.replicator-message-adapter-unexpected-ask-timeout")
.asScala
.toScala
private val classicSystem = system.toClassic.asInstanceOf[ExtendedActorSystem]

View file

@ -25,7 +25,7 @@ import pekko.cluster.singleton.{
}
import pekko.cluster.typed.internal.AdaptedClusterSingletonImpl
import pekko.coordination.lease.LeaseUsageSettings
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import com.typesafe.config.Config
object ClusterSingletonSettings {
@ -73,13 +73,13 @@ final class ClusterSingletonSettings(
def withRemovalMargin(removalMargin: FiniteDuration): ClusterSingletonSettings = copy(removalMargin = removalMargin)
def withRemovalMargin(removalMargin: java.time.Duration): ClusterSingletonSettings =
withRemovalMargin(removalMargin.asScala)
withRemovalMargin(removalMargin.toScala)
def withHandoverRetryInterval(handOverRetryInterval: FiniteDuration): ClusterSingletonSettings =
copy(handOverRetryInterval = handOverRetryInterval)
def withHandoverRetryInterval(handOverRetryInterval: java.time.Duration): ClusterSingletonSettings =
withHandoverRetryInterval(handOverRetryInterval.asScala)
withHandoverRetryInterval(handOverRetryInterval.toScala)
def withBufferSize(bufferSize: Int): ClusterSingletonSettings = copy(bufferSize = bufferSize)
@ -237,7 +237,7 @@ object ClusterSingletonManagerSettings {
val lease = config.getString("use-lease") match {
case s if s.isEmpty => None
case leaseConfigPath =>
Some(new LeaseUsageSettings(leaseConfigPath, config.getDuration("lease-retry-interval").asScala))
Some(new LeaseUsageSettings(leaseConfigPath, config.getDuration("lease-retry-interval").toScala))
}
new ClusterSingletonManagerSettings(
singletonName = config.getString("singleton-name"),
@ -303,13 +303,13 @@ final class ClusterSingletonManagerSettings(
copy(removalMargin = removalMargin)
def withRemovalMargin(removalMargin: java.time.Duration): ClusterSingletonManagerSettings =
withRemovalMargin(removalMargin.asScala)
withRemovalMargin(removalMargin.toScala)
def withHandOverRetryInterval(retryInterval: FiniteDuration): ClusterSingletonManagerSettings =
copy(handOverRetryInterval = retryInterval)
def withHandOverRetryInterval(retryInterval: java.time.Duration): ClusterSingletonManagerSettings =
withHandOverRetryInterval(retryInterval.asScala)
withHandOverRetryInterval(retryInterval.toScala)
def withLeaseSettings(leaseSettings: LeaseUsageSettings) = copy(leaseSettings = Option(leaseSettings))

View file

@ -37,7 +37,7 @@ import pekko.stream.scaladsl.StreamRefs
import pekko.stream.testkit.TestSubscriber
import pekko.stream.testkit.scaladsl.TestSink
import pekko.testkit._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
object StreamRefSpec extends MultiNodeConfig {
val first = role("first")
@ -266,7 +266,7 @@ abstract class StreamRefSpec extends MultiNodeClusterSpec(StreamRefSpec) with Im
// the subscription timeout for a failure
val timeout = system.settings.config
.getDuration("pekko.stream.materializer.stream-ref.subscription-timeout")
.asScala + 2.seconds
.toScala + 2.seconds
streamLifecycle3.expectMsg(timeout, "failed-system-42-tmp")
}

View file

@ -16,12 +16,12 @@ package org.apache.pekko.coordination.lease
import scala.concurrent.duration.FiniteDuration
import org.apache.pekko
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.PrettyDuration._
final class LeaseUsageSettings private[pekko] (val leaseImplementation: String,
val leaseRetryInterval: FiniteDuration) {
def getLeaseRetryInterval(): java.time.Duration = leaseRetryInterval.asJava
def getLeaseRetryInterval(): java.time.Duration = leaseRetryInterval.toJava
override def toString = s"LeaseUsageSettings($leaseImplementation, ${leaseRetryInterval.pretty})"
}

View file

@ -17,18 +17,18 @@ import scala.concurrent.duration._
import com.typesafe.config.{ Config, ConfigValueType }
import org.apache.pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
object TimeoutSettings {
def apply(config: Config): TimeoutSettings = {
val heartBeatTimeout = config.getDuration("heartbeat-timeout").asScala
val heartBeatTimeout = config.getDuration("heartbeat-timeout").toScala
val heartBeatInterval = config.getValue("heartbeat-interval").valueType() match {
case ConfigValueType.STRING if config.getString("heartbeat-interval").isEmpty =>
(heartBeatTimeout / 10).max(5.seconds)
case _ => config.getDuration("heartbeat-interval").asScala
case _ => config.getDuration("heartbeat-interval").toScala
}
require(heartBeatInterval < (heartBeatTimeout / 2), "heartbeat-interval must be less than half heartbeat-timeout")
new TimeoutSettings(heartBeatInterval, heartBeatTimeout, config.getDuration("lease-operation-timeout").asScala)
new TimeoutSettings(heartBeatInterval, heartBeatTimeout, config.getDuration("lease-operation-timeout").toScala)
}
}
@ -41,37 +41,37 @@ final class TimeoutSettings(
/**
* Java API
*/
def getHeartbeatInterval(): java.time.Duration = heartbeatInterval.asJava
def getHeartbeatInterval(): java.time.Duration = heartbeatInterval.toJava
/**
* Java API
*/
def getHeartbeatTimeout(): java.time.Duration = heartbeatTimeout.asJava
def getHeartbeatTimeout(): java.time.Duration = heartbeatTimeout.toJava
/**
* Java API
*/
def getOperationTimeout(): java.time.Duration = operationTimeout.asJava
def getOperationTimeout(): java.time.Duration = operationTimeout.toJava
/**
* Java API
*/
def withHeartbeatInterval(heartbeatInterval: java.time.Duration): TimeoutSettings = {
copy(heartbeatInterval = heartbeatInterval.asScala)
copy(heartbeatInterval = heartbeatInterval.toScala)
}
/**
* Java API
*/
def withHeartbeatTimeout(heartbeatTimeout: java.time.Duration): TimeoutSettings = {
copy(heartbeatTimeout = heartbeatTimeout.asScala)
copy(heartbeatTimeout = heartbeatTimeout.toScala)
}
/**
* Java API
*/
def withOperationTimeout(operationTimeout: java.time.Duration): TimeoutSettings = {
copy(operationTimeout = operationTimeout.asScala)
copy(operationTimeout = operationTimeout.toScala)
}
def withHeartbeatInterval(heartbeatInterval: FiniteDuration): TimeoutSettings = {

View file

@ -68,7 +68,7 @@ import pekko.remote.RARP
import pekko.serialization.SerializationExtension
import pekko.util.ByteString
import pekko.util.Helpers.toRootLowerCase
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
object ReplicatorSettings {
@ -307,7 +307,7 @@ object Replicator {
/**
* Java API
*/
def this(n: Int, timeout: java.time.Duration) = this(n, timeout.asScala)
def this(n: Int, timeout: java.time.Duration) = this(n, timeout.toScala)
}
final case class ReadMajority(timeout: FiniteDuration, minCap: Int = DefaultMajorityMinCap) extends ReadConsistency {
def this(timeout: FiniteDuration) = this(timeout, DefaultMajorityMinCap)
@ -315,7 +315,7 @@ object Replicator {
/**
* Java API
*/
def this(timeout: java.time.Duration) = this(timeout.asScala, DefaultMajorityMinCap)
def this(timeout: java.time.Duration) = this(timeout.toScala, DefaultMajorityMinCap)
}
/**
@ -329,14 +329,14 @@ object Replicator {
/**
* Java API
*/
def this(timeout: java.time.Duration, additional: Int) = this(timeout.asScala, additional, DefaultMajorityMinCap)
def this(timeout: java.time.Duration, additional: Int) = this(timeout.toScala, additional, DefaultMajorityMinCap)
}
final case class ReadAll(timeout: FiniteDuration) extends ReadConsistency {
/**
* Java API
*/
def this(timeout: java.time.Duration) = this(timeout.asScala)
def this(timeout: java.time.Duration) = this(timeout.toScala)
}
sealed trait WriteConsistency {
@ -351,7 +351,7 @@ object Replicator {
/**
* Java API
*/
def this(n: Int, timeout: java.time.Duration) = this(n, timeout.asScala)
def this(n: Int, timeout: java.time.Duration) = this(n, timeout.toScala)
}
final case class WriteMajority(timeout: FiniteDuration, minCap: Int = DefaultMajorityMinCap)
extends WriteConsistency {
@ -360,7 +360,7 @@ object Replicator {
/**
* Java API
*/
def this(timeout: java.time.Duration) = this(timeout.asScala, DefaultMajorityMinCap)
def this(timeout: java.time.Duration) = this(timeout.toScala, DefaultMajorityMinCap)
}
/**
@ -374,14 +374,14 @@ object Replicator {
/**
* Java API
*/
def this(timeout: java.time.Duration, additional: Int) = this(timeout.asScala, additional, DefaultMajorityMinCap)
def this(timeout: java.time.Duration, additional: Int) = this(timeout.toScala, additional, DefaultMajorityMinCap)
}
final case class WriteAll(timeout: FiniteDuration) extends WriteConsistency {
/**
* Java API
*/
def this(timeout: java.time.Duration) = this(timeout.asScala)
def this(timeout: java.time.Duration) = this(timeout.toScala)
}
/**

View file

@ -21,7 +21,7 @@ import pekko.Done
import pekko.actor.ClassicActorSystemProvider
import pekko.persistence.testkit.scaladsl
import scala.jdk.FutureConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
/**
* Test utility to initialize persistence plugins. Useful when initialization order or coordination
@ -49,6 +49,6 @@ object PersistenceInit {
journalPluginId: String,
snapshotPluginId: String,
timeout: Duration): CompletionStage[Done] =
scaladsl.PersistenceInit.initializePlugins(system, journalPluginId, snapshotPluginId, timeout.asScala).asJava
scaladsl.PersistenceInit.initializePlugins(system, journalPluginId, snapshotPluginId, timeout.toScala).asJava
}

View file

@ -22,7 +22,7 @@ import pekko.actor.ActorSystem
import pekko.annotation.ApiMayChange
import pekko.persistence.testkit.{ EventStorage, ExpectedFailure, ExpectedRejection, JournalOperation }
import pekko.persistence.testkit.scaladsl.{ PersistenceTestKit => ScalaTestKit }
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.ccompat.JavaConverters._
/**
@ -42,7 +42,7 @@ class PersistenceTestKit(scalaTestkit: ScalaTestKit) {
* Check for `max` time that nothing has been saved in the storage.
*/
def expectNothingPersisted(persistenceId: String, max: Duration): Unit =
scalaTestkit.expectNothingPersisted(persistenceId, max.asScala)
scalaTestkit.expectNothingPersisted(persistenceId, max.toScala)
/**
* Check that `event` has been saved in the storage.
@ -54,7 +54,7 @@ class PersistenceTestKit(scalaTestkit: ScalaTestKit) {
* Check for `max` time that `event` has been saved in the storage.
*/
def expectNextPersisted[A](persistenceId: String, event: A, max: Duration): A =
scalaTestkit.expectNextPersisted(persistenceId, event, max.asScala)
scalaTestkit.expectNextPersisted(persistenceId, event, max.toScala)
/**
* Check that next persisted in storage for particular persistence id event has expected type.
@ -66,7 +66,7 @@ class PersistenceTestKit(scalaTestkit: ScalaTestKit) {
* Check for `max` time that next persisted in storage for particular persistence id event has expected type.
*/
def expectNextPersistedClass[A](persistenceId: String, cla: Class[A], max: Duration): A =
scalaTestkit.expectNextPersistedClass(persistenceId, cla, max.asScala)
scalaTestkit.expectNextPersistedClass(persistenceId, cla, max.toScala)
/**
* Fail next `n` write operations with the `cause` exception for particular persistence id.
@ -201,7 +201,7 @@ class PersistenceTestKit(scalaTestkit: ScalaTestKit) {
* Receive for `max` time next n events from the storage.
*/
def receivePersisted[A](persistenceId: String, n: Int, cla: Class[A], max: Duration): JList[A] =
scalaTestkit.receivePersisted(persistenceId, n, cla, max.asScala).asJava
scalaTestkit.receivePersisted(persistenceId, n, cla, max.toScala).asJava
/**
* Reject next n save in storage operations for particular persistence id with `cause` exception.

View file

@ -23,7 +23,7 @@ import pekko.annotation.ApiMayChange
import pekko.japi.Pair
import pekko.persistence.testkit.{ ExpectedFailure, SnapshotMeta, SnapshotOperation, SnapshotStorage }
import pekko.persistence.testkit.scaladsl.{ SnapshotTestKit => ScalaTestKit }
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.ccompat.JavaConverters._
/**
@ -43,7 +43,7 @@ class SnapshotTestKit(scalaTestkit: ScalaTestKit) {
* Check for `max` time that nothing has been saved in the storage.
*/
def expectNothingPersisted(persistenceId: String, max: Duration): Unit =
scalaTestkit.expectNothingPersisted(persistenceId, max.asScala)
scalaTestkit.expectNothingPersisted(persistenceId, max.toScala)
/**
* Check that `snapshot` has been saved in the storage.
@ -55,7 +55,7 @@ class SnapshotTestKit(scalaTestkit: ScalaTestKit) {
* Check for `max` time that `snapshot` has been saved in the storage.
*/
def expectNextPersisted[A](persistenceId: String, snapshot: A, max: Duration): A =
scalaTestkit.expectNextPersisted(persistenceId, snapshot, max.asScala)
scalaTestkit.expectNextPersisted(persistenceId, snapshot, max.toScala)
/**
* Check that next persisted in storage for particular persistence id snapshot has expected type.
@ -67,7 +67,7 @@ class SnapshotTestKit(scalaTestkit: ScalaTestKit) {
* Check for `max` time that next persisted in storage for particular persistence id snapshot has expected type.
*/
def expectNextPersistedClass[A](persistenceId: String, cla: Class[A], max: Duration): A =
scalaTestkit.expectNextPersistedClass[A](persistenceId, cla, max.asScala)
scalaTestkit.expectNextPersistedClass[A](persistenceId, cla, max.toScala)
/**
* Fail next `n` write operations with the `cause` exception for particular persistence id.
@ -202,7 +202,7 @@ class SnapshotTestKit(scalaTestkit: ScalaTestKit) {
* Receive for `max` time next `n` snapshots that have been persisted in the storage.
*/
def receivePersisted[A](persistenceId: String, n: Int, cla: Class[A], max: Duration): JList[A] =
scalaTestkit.receivePersisted[A](persistenceId, n, cla, max.asScala).asJava
scalaTestkit.receivePersisted[A](persistenceId, n, cla, max.toScala).asJava
/**
* Persist `snapshots` with metadata into storage in order.

View file

@ -32,7 +32,7 @@ import pekko.persistence.typed.delivery.EventSourcedProducerQueue.CleanupTick
import pekko.persistence.typed.scaladsl.Effect
import pekko.persistence.typed.scaladsl.EventSourcedBehavior
import pekko.persistence.typed.scaladsl.RetentionCriteria
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
/**
* [[pekko.actor.typed.delivery.DurableProducerQueue]] that can be used with [[pekko.actor.typed.delivery.ProducerController]]
@ -61,11 +61,11 @@ object EventSourcedProducerQueue {
*/
def apply(config: Config): Settings = {
new Settings(
restartMaxBackoff = config.getDuration("restart-max-backoff").asScala,
restartMaxBackoff = config.getDuration("restart-max-backoff").toScala,
snapshotEvery = config.getInt("snapshot-every"),
keepNSnapshots = config.getInt("keep-n-snapshots"),
deleteEvents = config.getBoolean("delete-events"),
cleanupUnusedAfter = config.getDuration("cleanup-unused-after").asScala,
cleanupUnusedAfter = config.getDuration("cleanup-unused-after").toScala,
journalPluginId = config.getString("journal-plugin-id"),
snapshotPluginId = config.getString("snapshot-plugin-id"))
}
@ -113,13 +113,13 @@ object EventSourcedProducerQueue {
* Java API
*/
def withRestartMaxBackoff(newRestartMaxBackoff: JavaDuration): Settings =
copy(restartMaxBackoff = newRestartMaxBackoff.asScala)
copy(restartMaxBackoff = newRestartMaxBackoff.toScala)
/**
* Java API
*/
def getRestartMaxBackoff(): JavaDuration =
restartMaxBackoff.asJava
restartMaxBackoff.toJava
/**
* Scala API
@ -131,13 +131,13 @@ object EventSourcedProducerQueue {
* Java API
*/
def withCleanupUnusedAfter(newCleanupUnusedAfter: JavaDuration): Settings =
copy(cleanupUnusedAfter = newCleanupUnusedAfter.asScala)
copy(cleanupUnusedAfter = newCleanupUnusedAfter.toScala)
/**
* Java API
*/
def getCleanupUnusedAfter(): JavaDuration =
cleanupUnusedAfter.asJava
cleanupUnusedAfter.toJava
def withJournalPluginId(id: String): Settings =
copy(journalPluginId = id)

View file

@ -19,7 +19,7 @@ import java.util.Optional
import org.apache.pekko
import pekko.japi.function.Function3
import pekko.persistence.typed.SnapshotAdapter
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.jdk.OptionConverters._
/**
@ -37,5 +37,5 @@ object PersistentFSMMigration {
*/
def snapshotAdapter[State](adapt: Function3[String, Any, Optional[Duration], State]): SnapshotAdapter[State] =
pekko.persistence.typed.scaladsl.PersistentFSMMigration.snapshotAdapter((stateId, snapshot, timer) =>
adapt.apply(stateId, snapshot, timer.map(_.asJava).toJava))
adapt.apply(stateId, snapshot, timer.map(_.toJava).toJava))
}

View file

@ -28,7 +28,6 @@ import pekko.annotation.InternalApi
import pekko.persistence.{ PersistentActor, RecoveryCompleted, SnapshotOffer }
import pekko.persistence.fsm.PersistentFSM.FSMState
import pekko.persistence.serialization.Message
import pekko.util.JavaDurationConverters
/**
* SnapshotAfter Extension Id and factory for creating SnapshotAfter extension
@ -430,8 +429,8 @@ object PersistentFSM {
* Use Duration.Inf to deactivate an existing timeout.
*/
def forMax(timeout: java.time.Duration): State[S, D, E] = {
import JavaDurationConverters._
forMax(timeout.asScala)
import scala.jdk.DurationConverters._
forMax(timeout.toScala)
}
/**

View file

@ -23,7 +23,7 @@ import pekko.japi.function.{ Effect, Function2, Predicate, Predicate2, Procedure
import pekko.actor._
import pekko.japi.pf.{ FSMTransitionHandlerBuilder, UnitMatch, UnitPFBuilder }
import pekko.routing.{ Deafen, Listen, Listeners }
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.unused
/**
@ -1118,7 +1118,7 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D
* in the mailbox when the new timer was started.
*/
def startTimerWithFixedDelay(name: String, msg: Any, delay: java.time.Duration): Unit =
startTimerWithFixedDelay(name, msg, delay.asScala)
startTimerWithFixedDelay(name, msg, delay.toScala)
/**
* Schedules a message to be sent repeatedly to the `self` actor with a
@ -1146,7 +1146,7 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D
* in the mailbox when the new timer was started.
*/
def startTimerAtFixedRate(name: String, msg: Any, interval: java.time.Duration): Unit =
startTimerAtFixedRate(name, msg, interval.asScala)
startTimerAtFixedRate(name, msg, interval.toScala)
/**
* Start a timer that will send `msg` once to the `self` actor after
@ -1158,7 +1158,7 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D
* in the mailbox when the new timer was started.
*/
def startSingleTimer(name: String, msg: Any, delay: java.time.Duration): Unit =
startSingleTimer(name, msg, delay.asScala)
startSingleTimer(name, msg, delay.toScala)
/**
* Schedule named timer to deliver message after given delay, possibly repeating.

View file

@ -26,7 +26,7 @@ import com.fasterxml.jackson.datatype.jsr310.ser.DurationSerializer
import org.apache.pekko
import pekko.annotation.InternalApi
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
/**
* INTERNAL API: Adds support for serializing and deserializing [[FiniteDuration]].
@ -51,7 +51,7 @@ import pekko.util.JavaDurationConverters._
@InternalApi private[pekko] class FiniteDurationSerializer
extends StdScalarSerializer[FiniteDuration](classOf[FiniteDuration]) {
override def serialize(value: FiniteDuration, jgen: JsonGenerator, provider: SerializerProvider): Unit = {
DurationSerializer.INSTANCE.serialize(value.asJava, jgen, provider)
DurationSerializer.INSTANCE.serialize(value.toJava, jgen, provider)
}
}
@ -69,6 +69,6 @@ import pekko.util.JavaDurationConverters._
extends StdScalarDeserializer[FiniteDuration](classOf[FiniteDuration]) {
def deserialize(jp: JsonParser, ctxt: DeserializationContext): FiniteDuration = {
DurationDeserializer.INSTANCE.deserialize(jp, ctxt).asScala
DurationDeserializer.INSTANCE.deserialize(jp, ctxt).toScala
}
}

View file

@ -34,7 +34,7 @@ import pekko.stream._
import pekko.stream.impl._
import pekko.testkit.{ TestActor, TestProbe }
import pekko.testkit.TestActor.AutoPilot
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.ccompat.JavaConverters._
import org.reactivestreams.{ Publisher, Subscriber, Subscription }
@ -196,7 +196,7 @@ object TestPublisher {
* Expect no messages for a given duration.
* @since 1.1.0
*/
def expectNoMessage(max: java.time.Duration): Self = expectNoMessage(max.asScala)
def expectNoMessage(max: java.time.Duration): Self = expectNoMessage(max.toScala)
/**
* Receive messages for a given duration or until one does not match a given partial function.
@ -218,7 +218,7 @@ object TestPublisher {
idle: java.time.Duration,
messages: Int,
f: PartialFunction[PublisherEvent, T]): java.util.List[T] =
receiveWhile(max.asScala, idle.asScala, messages)(f).asJava
receiveWhile(max.toScala, idle.toScala, messages)(f).asJava
def expectEventPF[T](f: PartialFunction[PublisherEvent, T]): T =
executeAfterSubscription {
@ -270,7 +270,7 @@ object TestPublisher {
def within[T](min: java.time.Duration,
max: java.time.Duration,
creator: function.Creator[T]): T =
within(min.asScala, max.asScala)(creator.create())
within(min.toScala, max.toScala)(creator.create())
/**
* Same as calling `within(0 seconds, max)(f)`.
@ -286,7 +286,7 @@ object TestPublisher {
* @since 1.1.0
*/
def within[T](max: java.time.Duration,
creator: function.Creator[T]): T = within(max.asScala)(creator.create())
creator: function.Creator[T]): T = within(max.toScala)(creator.create())
}
object Probe {
@ -453,7 +453,7 @@ object TestSubscriber {
* Expect and return [[SubscriberEvent]] (any of: `OnSubscribe`, `OnNext`, `OnError` or `OnComplete`).
* @since 1.1.0
*/
def expectEvent(max: java.time.Duration): SubscriberEvent = expectEvent(max.asScala)
def expectEvent(max: java.time.Duration): SubscriberEvent = expectEvent(max.toScala)
/**
* Fluent DSL
@ -490,7 +490,7 @@ object TestSubscriber {
* Expect and return a stream element during specified time or timeout.
* @since 1.1.0
*/
def expectNext(d: java.time.Duration): I = expectNext(d.asScala)
def expectNext(d: java.time.Duration): I = expectNext(d.toScala)
/**
* Fluent DSL
@ -520,7 +520,7 @@ object TestSubscriber {
* Expect a stream element during specified time or timeout.
* @since 1.1.0
*/
def expectNext(d: java.time.Duration, element: I): Self = expectNext(d.asScala, element)
def expectNext(d: java.time.Duration, element: I): Self = expectNext(d.toScala, element)
/**
* Fluent DSL
@ -790,7 +790,7 @@ object TestSubscriber {
* Java API: Assert that no message is received for the specified time.
*/
def expectNoMessage(remaining: java.time.Duration): Self = {
probe.expectNoMessage(remaining.asScala)
probe.expectNoMessage(remaining.toScala)
self
}
@ -821,7 +821,7 @@ object TestSubscriber {
* @since 1.1.0
*/
def expectNextWithTimeoutPF[T](max: java.time.Duration, f: PartialFunction[Any, T]): T =
expectEventWithTimeoutPF(max.asScala, f)
expectEventWithTimeoutPF(max.toScala, f)
/**
* Expect a stream element during specified time or timeout and test it with partial function.
@ -844,7 +844,7 @@ object TestSubscriber {
* @since 1.1.0
*/
def expectNextChainingPF(max: java.time.Duration, f: PartialFunction[Any, Any]): Self =
expectNextChainingPF(max.asScala, f)
expectNextChainingPF(max.toScala, f)
/**
* Expect a stream element during specified time or timeout and test it with partial function.
@ -862,7 +862,7 @@ object TestSubscriber {
* @since 1.1.0
*/
def expectEventWithTimeoutPF[T](max: java.time.Duration, f: PartialFunction[SubscriberEvent, T]): T =
expectEventWithTimeoutPF(max.asScala, f)
expectEventWithTimeoutPF(max.toScala, f)
def expectEventPF[T](f: PartialFunction[SubscriberEvent, T]): T =
expectEventWithTimeoutPF(Duration.Undefined, f)
@ -887,7 +887,7 @@ object TestSubscriber {
idle: java.time.Duration,
messages: Int,
f: PartialFunction[SubscriberEvent, T]): java.util.List[T] =
receiveWhile(max.asScala, idle.asScala, messages)(f).asJava
receiveWhile(max.toScala, idle.toScala, messages)(f).asJava
/**
* Drains a given number of messages
@ -907,7 +907,7 @@ object TestSubscriber {
* @since 1.1.0
*/
def receiveWithin(max: java.time.Duration, messages: Int): java.util.List[I] =
receiveWithin(max.asScala, messages).asJava
receiveWithin(max.toScala, messages).asJava
/**
* Attempt to drain the stream into a strict collection (by requesting `Long.MaxValue` elements).
@ -948,7 +948,7 @@ object TestSubscriber {
* @since 1.1.0
*/
def toStrict(atMost: java.time.Duration): java.util.List[I] =
toStrict(atMost.asScala).asJava
toStrict(atMost.toScala).asJava
/**
* Execute code block while bounding its execution time between `min` and
@ -990,7 +990,7 @@ object TestSubscriber {
*/
def within[T](min: java.time.Duration,
max: java.time.Duration,
creator: function.Creator[T]): T = within(min.asScala, max.asScala)(creator.create())
creator: function.Creator[T]): T = within(min.toScala, max.toScala)(creator.create())
/**
* Same as calling `within(0 seconds, max)(f)`.
@ -1003,7 +1003,7 @@ object TestSubscriber {
* Same as calling `within(Duration.ofSeconds(0), max)(f)`.
* @since 1.1.0
*/
def within[T](max: java.time.Duration)(creator: function.Creator[T]): T = within(max.asScala)(creator.create())
def within[T](max: java.time.Duration)(creator: function.Creator[T]): T = within(max.toScala)(creator.create())
def onSubscribe(subscription: Subscription): Unit = probe.ref ! OnSubscribe(subscription)
def onNext(element: I): Unit = probe.ref ! OnNext(element)
@ -1087,7 +1087,7 @@ object TestSubscriber {
* Request and expect a stream element during the specified time or timeout.
* @since 1.1.0
*/
def requestNext(d: java.time.Duration): T = requestNext(d.asScala)
def requestNext(d: java.time.Duration): T = requestNext(d.toScala)
}
}

View file

@ -21,7 +21,8 @@ import pekko.japi.Pair
import pekko.japi.function
import pekko.pattern.StatusReply
import pekko.stream.javadsl.Flow
import pekko.util.JavaDurationConverters
import scala.jdk.DurationConverters.JavaDurationOps
/**
* Collection of Flows aimed at integrating with typed Actors.
@ -71,7 +72,7 @@ object ActorFlow {
makeMessage: function.Function2[I, ActorRef[A], Q]): Flow[I, A, NotUsed] =
org.apache.pekko.stream.typed.scaladsl.ActorFlow
.ask[I, Q, A](parallelism = 2)(ref)((i, ref) => makeMessage(i, ref))(
JavaDurationConverters.asFiniteDuration(timeout))
timeout.toScala)
.asJava
/**
@ -85,7 +86,7 @@ object ActorFlow {
makeMessage: function.Function2[I, ActorRef[StatusReply[A]], Q]): Flow[I, A, NotUsed] =
org.apache.pekko.stream.typed.scaladsl.ActorFlow
.askWithStatus[I, Q, A](parallelism = 2)(ref)((i, ref) => makeMessage(i, ref))(
JavaDurationConverters.asFiniteDuration(timeout))
timeout.toScala)
.asJava
/**
@ -157,7 +158,7 @@ object ActorFlow {
.via(
org.apache.pekko.stream.typed.scaladsl.ActorFlow
.askWithContext[I, Q, A, Ctx](parallelism = 2)(ref)((i, ref) => makeMessage(i, ref))(
JavaDurationConverters.asFiniteDuration(timeout))
timeout.toScala)
.map { case (a, ctx) => Pair(a, ctx) })
.asJava
@ -176,7 +177,7 @@ object ActorFlow {
.via(
org.apache.pekko.stream.typed.scaladsl.ActorFlow
.askWithStatusAndContext[I, Q, A, Ctx](parallelism = 2)(ref)((i, ref) => makeMessage(i, ref))(
JavaDurationConverters.asFiniteDuration(timeout))
timeout.toScala)
.map { case (a, ctx) => Pair(a, ctx) })
.asJava
@ -194,7 +195,7 @@ object ActorFlow {
.via(
org.apache.pekko.stream.typed.scaladsl.ActorFlow
.askWithContext[I, Q, A, Ctx](parallelism)(ref)((i, ref) => makeMessage(i, ref))(
JavaDurationConverters.asFiniteDuration(timeout))
timeout.toScala)
.map { case (a, ctx) => Pair(a, ctx) })
.asJava
}

View file

@ -30,7 +30,7 @@ import pekko.event.Logging
import pekko.japi.function
import pekko.stream.impl.TraversalBuilder
import pekko.util.{ ByteString, OptionVal }
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.LineNumbers
import scala.jdk.OptionConverters._
@ -483,7 +483,7 @@ object Attributes {
* but you might still be able observe a long delay at the ultimate source.
*/
@ApiMayChange
def afterDelay(delay: java.time.Duration, strategy: Strategy): Strategy = AfterDelay(delay.asScala, strategy)
def afterDelay(delay: java.time.Duration, strategy: Strategy): Strategy = AfterDelay(delay.toScala, strategy)
}
/**
@ -834,7 +834,7 @@ object ActorAttributes {
* Java API: Defines a timeout for stream subscription and what action to take when that hits.
*/
def streamSubscriptionTimeout(timeout: Duration, mode: StreamSubscriptionTimeoutTerminationMode): Attributes =
streamSubscriptionTimeout(timeout.asScala, mode)
streamSubscriptionTimeout(timeout.toScala, mode)
/**
* Maximum number of elements emitted in batch if downstream signals large demand.
@ -926,7 +926,7 @@ object StreamRefAttributes {
/**
* Java API: Specifies the subscription timeout within which the remote side MUST subscribe to the handed out stream reference.
*/
def subscriptionTimeout(timeout: Duration): Attributes = subscriptionTimeout(timeout.asScala)
def subscriptionTimeout(timeout: Duration): Attributes = subscriptionTimeout(timeout.toScala)
/**
* Specifies the size of the buffer on the receiving side that is eagerly filled even without demand.
@ -943,7 +943,7 @@ object StreamRefAttributes {
* Java API: If no new elements arrive within this timeout, demand is redelivered.
*/
def demandRedeliveryInterval(timeout: Duration): Attributes =
demandRedeliveryInterval(timeout.asScala)
demandRedeliveryInterval(timeout.toScala)
/**
* Scala API: The time between the Terminated signal being received and when the local SourceRef determines to fail itself
@ -955,6 +955,6 @@ object StreamRefAttributes {
* Java API: The time between the Terminated signal being received and when the local SourceRef determines to fail itself
*/
def finalTerminationSignalDeadline(timeout: Duration): Attributes =
finalTerminationSignalDeadline(timeout.asScala)
finalTerminationSignalDeadline(timeout.toScala)
}

View file

@ -20,7 +20,7 @@ import pekko.japi.function
import pekko.event.Logging
import pekko.event.Logging.LogLevel
import pekko.util.ConstantFun
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
final class RestartSettings private (
val minBackoff: FiniteDuration,
@ -35,13 +35,13 @@ final class RestartSettings private (
def withMinBackoff(value: FiniteDuration): RestartSettings = copy(minBackoff = value)
/** Java API: minimum (initial) duration until the child actor will started again, if it is terminated */
def withMinBackoff(value: java.time.Duration): RestartSettings = copy(minBackoff = value.asScala)
def withMinBackoff(value: java.time.Duration): RestartSettings = copy(minBackoff = value.toScala)
/** Scala API: the exponential back-off is capped to this duration */
def withMaxBackoff(value: FiniteDuration): RestartSettings = copy(maxBackoff = value)
/** Java API: the exponential back-off is capped to this duration */
def withMaxBackoff(value: java.time.Duration): RestartSettings = copy(maxBackoff = value.asScala)
def withMaxBackoff(value: java.time.Duration): RestartSettings = copy(maxBackoff = value.toScala)
/**
* After calculation of the exponential back-off an additional random delay based on this factor is added
@ -55,7 +55,7 @@ final class RestartSettings private (
/** Java API: The amount of restarts is capped to `count` within a timeframe of `within` */
def withMaxRestarts(count: Int, within: java.time.Duration): RestartSettings =
copy(maxRestarts = count, maxRestartsWithin = within.asScala)
copy(maxRestarts = count, maxRestartsWithin = within.toScala)
/** Decides whether the failure should restart the stream or make the surrounding stream fail */
def withRestartOn(restartOn: function.Predicate[Throwable]): RestartSettings =
@ -100,11 +100,11 @@ object RestartSettings {
/** Java API */
def create(minBackoff: java.time.Duration, maxBackoff: java.time.Duration, randomFactor: Double): RestartSettings =
new RestartSettings(
minBackoff = minBackoff.asScala,
maxBackoff = maxBackoff.asScala,
minBackoff = minBackoff.toScala,
maxBackoff = maxBackoff.toScala,
randomFactor = randomFactor,
maxRestarts = Int.MaxValue,
maxRestartsWithin = minBackoff.asScala,
maxRestartsWithin = minBackoff.toScala,
logSettings = LogSettings.defaultSettings,
restartOn = ConstantFun.anyToTrue)

View file

@ -29,7 +29,7 @@ import pekko.annotation.InternalApi
import pekko.dispatch.Dispatchers
import pekko.pattern.ask
import pekko.stream.impl.MaterializerGuardian
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.Timeout
/**
@ -59,7 +59,7 @@ final class SystemMaterializer(system: ExtendedActorSystem) extends Extension {
private[pekko] val materializerSettings = ActorMaterializerSettings(system)
private implicit val materializerTimeout: Timeout =
system.settings.config.getDuration("pekko.stream.materializer.creation-timeout").asScala
system.settings.config.getDuration("pekko.stream.materializer.creation-timeout").toScala
@InternalApi @nowarn("msg=deprecated")
private val materializerGuardian = system.systemActorOf(

View file

@ -101,8 +101,8 @@ object BidiFlow {
* the *joint* frequencies of the elements in both directions.
*/
def bidirectionalIdleTimeout[I, O](timeout: java.time.Duration): BidiFlow[I, I, O, O, NotUsed] = {
import pekko.util.JavaDurationConverters._
new BidiFlow(scaladsl.BidiFlow.bidirectionalIdleTimeout(timeout.asScala))
import scala.jdk.DurationConverters._
new BidiFlow(scaladsl.BidiFlow.bidirectionalIdleTimeout(timeout.toScala))
}
}

View file

@ -18,7 +18,7 @@ import scala.concurrent.duration.FiniteDuration
import org.apache.pekko
import pekko.annotation.InternalApi
import pekko.stream.scaladsl
import pekko.util.JavaDurationConverters.JavaDurationOps
import scala.jdk.DurationConverters._
/**
* Allows to manage delay and can be stateful to compute delay for any sequence of elements,
@ -38,7 +38,7 @@ object DelayStrategy {
/** INTERNAL API */
@InternalApi
private[javadsl] def asScala[T](delayStrategy: DelayStrategy[T]) = new scaladsl.DelayStrategy[T] {
override def nextDelay(elem: T): FiniteDuration = delayStrategy.nextDelay(elem).asScala
override def nextDelay(elem: T): FiniteDuration = delayStrategy.nextDelay(elem).toScala
}
/**

View file

@ -36,7 +36,7 @@ import pekko.stream.{ javadsl, _ }
import pekko.stream.impl.fusing.{ StatefulMapConcat, ZipWithIndexJava }
import pekko.util.ConstantFun
import scala.jdk.FutureConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.jdk.OptionConverters._
import pekko.util.Timeout
import pekko.util.unused
@ -1628,7 +1628,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* IllegalArgumentException is thrown.
*/
def groupedWithin(maxNumber: Int, duration: java.time.Duration): javadsl.Flow[In, java.util.List[Out], Mat] =
new Flow(delegate.groupedWithin(maxNumber, duration.asScala).map(_.asJava)) // TODO optimize to one step
new Flow(delegate.groupedWithin(maxNumber, duration.toScala).map(_.asJava)) // TODO optimize to one step
/**
* Chunk up this stream into groups of elements received within a time window,
@ -1652,7 +1652,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
maxWeight: Long,
costFn: function.Function[Out, java.lang.Long],
duration: java.time.Duration): javadsl.Flow[In, java.util.List[Out], Mat] =
new Flow(delegate.groupedWeightedWithin(maxWeight, duration.asScala)(costFn.apply).map(_.asJava))
new Flow(delegate.groupedWeightedWithin(maxWeight, duration.toScala)(costFn.apply).map(_.asJava))
/**
* Chunk up this stream into groups of elements received within a time window,
@ -1678,7 +1678,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
maxNumber: Int,
costFn: function.Function[Out, java.lang.Long],
duration: java.time.Duration): javadsl.Flow[In, java.util.List[Out], Mat] =
new Flow(delegate.groupedWeightedWithin(maxWeight, maxNumber, duration.asScala)(costFn.apply).map(_.asJava))
new Flow(delegate.groupedWeightedWithin(maxWeight, maxNumber, duration.toScala)(costFn.apply).map(_.asJava))
/**
* Shifts elements emission in time by a specified amount. It allows to store elements
@ -1706,7 +1706,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* @param strategy Strategy that is used when incoming elements cannot fit inside the buffer
*/
def delay(of: java.time.Duration, strategy: DelayOverflowStrategy): Flow[In, Out, Mat] =
new Flow(delegate.delay(of.asScala, strategy))
new Flow(delegate.delay(of.toScala, strategy))
/**
* Shifts elements emission in time by an amount individually determined through delay strategy a specified amount.
@ -1771,7 +1771,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* '''Cancels when''' downstream cancels
*/
def dropWithin(duration: java.time.Duration): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.dropWithin(duration.asScala))
new Flow(delegate.dropWithin(duration.toScala))
/**
* Terminate processing (and cancel the upstream publisher) after predicate
@ -2222,7 +2222,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* See also [[Flow.limit]], [[Flow.limitWeighted]]
*/
def takeWithin(duration: java.time.Duration): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.takeWithin(duration.asScala))
new Flow(delegate.takeWithin(duration.toScala))
/**
* Allows a faster upstream to progress independently of a slower subscriber by conflating elements into a summary
@ -3750,7 +3750,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* '''Cancels when''' downstream cancels
*/
def initialTimeout(timeout: java.time.Duration): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.initialTimeout(timeout.asScala))
new Flow(delegate.initialTimeout(timeout.toScala))
/**
* If the completion of the stream does not happen until the provided timeout, the stream is failed
@ -3765,7 +3765,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* '''Cancels when''' downstream cancels
*/
def completionTimeout(timeout: java.time.Duration): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.completionTimeout(timeout.asScala))
new Flow(delegate.completionTimeout(timeout.toScala))
/**
* If the time between two processed elements exceeds the provided timeout, the stream is failed
@ -3781,7 +3781,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* '''Cancels when''' downstream cancels
*/
def idleTimeout(timeout: java.time.Duration): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.idleTimeout(timeout.asScala))
new Flow(delegate.idleTimeout(timeout.toScala))
/**
* If the time between the emission of an element and the following downstream demand exceeds the provided timeout,
@ -3797,7 +3797,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* '''Cancels when''' downstream cancels
*/
def backpressureTimeout(timeout: java.time.Duration): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.backpressureTimeout(timeout.asScala))
new Flow(delegate.backpressureTimeout(timeout.toScala))
/**
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
@ -3817,7 +3817,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* '''Cancels when''' downstream cancels
*/
def keepAlive(maxIdle: java.time.Duration, injectedElem: function.Creator[Out]): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.keepAlive(maxIdle.asScala, () => injectedElem.create()))
new Flow(delegate.keepAlive(maxIdle.toScala, () => injectedElem.create()))
/**
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
@ -3849,7 +3849,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* '''Cancels when''' downstream cancels
*/
def throttle(elements: Int, per: java.time.Duration): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.throttle(elements, per.asScala))
new Flow(delegate.throttle(elements, per.toScala))
/**
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
@ -3891,7 +3891,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
per: java.time.Duration,
maximumBurst: Int,
mode: ThrottleMode): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.throttle(elements, per.asScala, maximumBurst, mode))
new Flow(delegate.throttle(elements, per.toScala, maximumBurst, mode))
/**
* Sends elements downstream with speed limited to `cost/per`. Cost is
@ -3928,7 +3928,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
cost: Int,
per: java.time.Duration,
costCalculation: function.Function[Out, Integer]): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.throttle(cost, per.asScala, costCalculation.apply))
new Flow(delegate.throttle(cost, per.toScala, costCalculation.apply))
/**
* Sends elements downstream with speed limited to `cost/per`. Cost is
@ -3974,7 +3974,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
maximumBurst: Int,
costCalculation: function.Function[Out, Integer],
mode: ThrottleMode): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.throttle(cost, per.asScala, maximumBurst, costCalculation.apply, mode))
new Flow(delegate.throttle(cost, per.toScala, maximumBurst, costCalculation.apply, mode))
/**
* Detaches upstream demand from downstream demand without detaching the
@ -4034,7 +4034,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* '''Cancels when''' downstream cancels
*/
def initialDelay(delay: java.time.Duration): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.initialDelay(delay.asScala))
new Flow(delegate.initialDelay(delay.toScala))
/**
* Replace the attributes of this [[Flow]] with the given ones. If this Flow is a composite
@ -4314,7 +4314,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
aggregate = (agg, out) => aggregate.apply(agg, out).toScala,
harvest = agg => harvest.apply(agg),
emitOnTimer = Option(emitOnTimer).map {
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.asScala)
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.toScala)
})
.asJava
}
@ -4348,7 +4348,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
aggregate = (agg, out) => aggregate.apply(agg, out).toScala,
harvest = agg => harvest.apply(agg),
emitOnTimer = emitOnTimer.toScala.map {
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.asScala)
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.toScala)
})
.asJava
}

View file

@ -25,7 +25,7 @@ import pekko.japi.{ function, Pair }
import pekko.stream._
import pekko.util.ConstantFun
import scala.jdk.FutureConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.jdk.OptionConverters._
import pekko.util.ccompat.JavaConverters._
@ -381,7 +381,7 @@ final class FlowWithContext[In, CtxIn, Out, CtxOut, +Mat](
* @see [[pekko.stream.javadsl.Flow.throttle]]
*/
def throttle(elements: Int, per: java.time.Duration): FlowWithContext[In, CtxIn, Out, CtxOut, Mat] =
viaScala(_.throttle(elements, per.asScala))
viaScala(_.throttle(elements, per.toScala))
/**
* Context-preserving variant of [[pekko.stream.javadsl.Flow.throttle]].
@ -393,7 +393,7 @@ final class FlowWithContext[In, CtxIn, Out, CtxOut, +Mat](
per: java.time.Duration,
maximumBurst: Int,
mode: ThrottleMode): FlowWithContext[In, CtxIn, Out, CtxOut, Mat] =
viaScala(_.throttle(elements, per.asScala, maximumBurst, mode))
viaScala(_.throttle(elements, per.toScala, maximumBurst, mode))
/**
* Context-preserving variant of [[pekko.stream.javadsl.Flow.throttle]].
@ -404,7 +404,7 @@ final class FlowWithContext[In, CtxIn, Out, CtxOut, +Mat](
cost: Int,
per: java.time.Duration,
costCalculation: function.Function[Out, Integer]): FlowWithContext[In, CtxIn, Out, CtxOut, Mat] =
viaScala(_.throttle(cost, per.asScala, costCalculation.apply))
viaScala(_.throttle(cost, per.toScala, costCalculation.apply))
/**
* Context-preserving variant of [[pekko.stream.javadsl.Flow.throttle]].
@ -417,7 +417,7 @@ final class FlowWithContext[In, CtxIn, Out, CtxOut, +Mat](
maximumBurst: Int,
costCalculation: function.Function[Out, Integer],
mode: ThrottleMode): FlowWithContext[In, CtxIn, Out, CtxOut, Mat] =
viaScala(_.throttle(cost, per.asScala, maximumBurst, costCalculation.apply, mode))
viaScala(_.throttle(cost, per.toScala, maximumBurst, costCalculation.apply, mode))
def asScala: scaladsl.FlowWithContext[In, CtxIn, Out, CtxOut, Mat] =
scaladsl.FlowWithContext.fromTuples(

View file

@ -19,7 +19,7 @@ import org.apache.pekko
import pekko.annotation.ApiMayChange
import pekko.japi.Pair
import pekko.stream.scaladsl
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.jdk.OptionConverters._
object RetryFlow {
@ -54,7 +54,7 @@ object RetryFlow {
flow: Flow[In, Out, Mat],
decideRetry: pekko.japi.function.Function2[In, Out, Optional[In]]): Flow[In, Out, Mat] =
scaladsl.RetryFlow
.withBackoff[In, Out, Mat](minBackoff.asScala, maxBackoff.asScala, randomFactor, maxRetries, flow.asScala) {
.withBackoff[In, Out, Mat](minBackoff.toScala, maxBackoff.toScala, randomFactor, maxRetries, flow.asScala) {
(in, out) =>
decideRetry.apply(in, out).toScala
}
@ -95,8 +95,8 @@ object RetryFlow {
: FlowWithContext[In, InCtx, Out, OutCtx, Mat] =
scaladsl.RetryFlow
.withBackoffAndContext[In, InCtx, Out, OutCtx, Mat](
minBackoff.asScala,
maxBackoff.asScala,
minBackoff.toScala,
maxBackoff.toScala,
randomFactor,
maxRetries,
flow.asScala) { (in, out) =>

View file

@ -35,7 +35,7 @@ import pekko.stream.impl.{ LinearTraversalBuilder, UnfoldAsyncJava, UnfoldJava }
import pekko.stream.impl.fusing.{ ArraySource, StatefulMapConcat, ZipWithIndexJava }
import pekko.util.{ unused, _ }
import scala.jdk.FutureConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.jdk.OptionConverters._
import pekko.util.ccompat.JavaConverters._
@ -229,7 +229,7 @@ object Source {
* receive new tick elements as soon as it has requested more elements.
*/
def tick[O](initialDelay: java.time.Duration, interval: java.time.Duration, tick: O): javadsl.Source[O, Cancellable] =
new Source(scaladsl.Source.tick(initialDelay.asScala, interval.asScala, tick))
new Source(scaladsl.Source.tick(initialDelay.toScala, interval.toScala, tick))
/**
* Create a `Source` with one element.
@ -3333,7 +3333,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
def groupedWithin(
maxNumber: Int,
duration: java.time.Duration): javadsl.Source[java.util.List[Out @uncheckedVariance], Mat] =
new Source(delegate.groupedWithin(maxNumber, duration.asScala).map(_.asJava)) // TODO optimize to one step
new Source(delegate.groupedWithin(maxNumber, duration.toScala).map(_.asJava)) // TODO optimize to one step
/**
* Chunk up this stream into groups of elements received within a time window,
@ -3357,7 +3357,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
maxWeight: Long,
costFn: function.Function[Out, java.lang.Long],
duration: java.time.Duration): javadsl.Source[java.util.List[Out @uncheckedVariance], Mat] =
new Source(delegate.groupedWeightedWithin(maxWeight, duration.asScala)(costFn.apply).map(_.asJava))
new Source(delegate.groupedWeightedWithin(maxWeight, duration.toScala)(costFn.apply).map(_.asJava))
/**
* Chunk up this stream into groups of elements received within a time window,
@ -3383,7 +3383,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
maxNumber: Int,
costFn: function.Function[Out, java.lang.Long],
duration: java.time.Duration): javadsl.Source[java.util.List[Out @uncheckedVariance], Mat] =
new Source(delegate.groupedWeightedWithin(maxWeight, maxNumber, duration.asScala)(costFn.apply).map(_.asJava))
new Source(delegate.groupedWeightedWithin(maxWeight, maxNumber, duration.toScala)(costFn.apply).map(_.asJava))
/**
* Shifts elements emission in time by a specified amount. It allows to store elements
@ -3411,7 +3411,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* @param strategy Strategy that is used when incoming elements cannot fit inside the buffer
*/
def delay(of: java.time.Duration, strategy: DelayOverflowStrategy): Source[Out, Mat] =
new Source(delegate.delay(of.asScala, strategy))
new Source(delegate.delay(of.toScala, strategy))
/**
* Shifts elements emission in time by an amount individually determined through delay strategy a specified amount.
@ -3476,7 +3476,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* '''Cancels when''' downstream cancels
*/
def dropWithin(duration: java.time.Duration): javadsl.Source[Out, Mat] =
new Source(delegate.dropWithin(duration.asScala))
new Source(delegate.dropWithin(duration.toScala))
/**
* Terminate processing (and cancel the upstream publisher) after predicate
@ -3598,7 +3598,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* '''Cancels when''' downstream cancels or timer fires
*/
def takeWithin(duration: java.time.Duration): javadsl.Source[Out, Mat] =
new Source(delegate.takeWithin(duration.asScala))
new Source(delegate.takeWithin(duration.toScala))
/**
* Allows a faster upstream to progress independently of a slower subscriber by conflating elements into a summary
@ -4212,7 +4212,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* '''Cancels when''' downstream cancels
*/
def initialTimeout(timeout: java.time.Duration): javadsl.Source[Out, Mat] =
new Source(delegate.initialTimeout(timeout.asScala))
new Source(delegate.initialTimeout(timeout.toScala))
/**
* If the completion of the stream does not happen until the provided timeout, the stream is failed
@ -4227,7 +4227,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* '''Cancels when''' downstream cancels
*/
def completionTimeout(timeout: java.time.Duration): javadsl.Source[Out, Mat] =
new Source(delegate.completionTimeout(timeout.asScala))
new Source(delegate.completionTimeout(timeout.toScala))
/**
* If the time between two processed elements exceeds the provided timeout, the stream is failed
@ -4243,7 +4243,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* '''Cancels when''' downstream cancels
*/
def idleTimeout(timeout: java.time.Duration): javadsl.Source[Out, Mat] =
new Source(delegate.idleTimeout(timeout.asScala))
new Source(delegate.idleTimeout(timeout.toScala))
/**
* If the time between the emission of an element and the following downstream demand exceeds the provided timeout,
@ -4259,7 +4259,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* '''Cancels when''' downstream cancels
*/
def backpressureTimeout(timeout: java.time.Duration): javadsl.Source[Out, Mat] =
new Source(delegate.backpressureTimeout(timeout.asScala))
new Source(delegate.backpressureTimeout(timeout.toScala))
/**
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
@ -4279,7 +4279,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* '''Cancels when''' downstream cancels
*/
def keepAlive(maxIdle: java.time.Duration, injectedElem: function.Creator[Out]): javadsl.Source[Out, Mat] =
new Source(delegate.keepAlive(maxIdle.asScala, () => injectedElem.create()))
new Source(delegate.keepAlive(maxIdle.toScala, () => injectedElem.create()))
/**
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
@ -4311,7 +4311,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* '''Cancels when''' downstream cancels
*/
def throttle(elements: Int, per: java.time.Duration): javadsl.Source[Out, Mat] =
new Source(delegate.throttle(elements, per.asScala))
new Source(delegate.throttle(elements, per.toScala))
/**
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
@ -4353,7 +4353,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
per: java.time.Duration,
maximumBurst: Int,
mode: ThrottleMode): javadsl.Source[Out, Mat] =
new Source(delegate.throttle(elements, per.asScala, maximumBurst, mode))
new Source(delegate.throttle(elements, per.toScala, maximumBurst, mode))
/**
* Sends elements downstream with speed limited to `cost/per`. Cost is
@ -4390,7 +4390,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
cost: Int,
per: java.time.Duration,
costCalculation: function.Function[Out, Integer]): javadsl.Source[Out, Mat] =
new Source(delegate.throttle(cost, per.asScala, costCalculation.apply _))
new Source(delegate.throttle(cost, per.toScala, costCalculation.apply _))
/**
* Sends elements downstream with speed limited to `cost/per`. Cost is
@ -4436,7 +4436,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
maximumBurst: Int,
costCalculation: function.Function[Out, Integer],
mode: ThrottleMode): javadsl.Source[Out, Mat] =
new Source(delegate.throttle(cost, per.asScala, maximumBurst, costCalculation.apply _, mode))
new Source(delegate.throttle(cost, per.toScala, maximumBurst, costCalculation.apply _, mode))
/**
* Detaches upstream demand from downstream demand without detaching the
@ -4495,7 +4495,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* '''Cancels when''' downstream cancels
*/
def initialDelay(delay: java.time.Duration): javadsl.Source[Out, Mat] =
new Source(delegate.initialDelay(delay.asScala))
new Source(delegate.initialDelay(delay.toScala))
/**
* Replace the attributes of this [[Source]] with the given ones. If this Source is a composite
@ -4755,7 +4755,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
aggregate = (agg, out) => aggregate.apply(agg, out).toScala,
harvest = agg => harvest.apply(agg),
emitOnTimer = Option(emitOnTimer).map {
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.asScala)
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.toScala)
})
.asJava
}
@ -4788,7 +4788,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
aggregate = (agg, out) => aggregate.apply(agg, out).toScala,
harvest = agg => harvest.apply(agg),
emitOnTimer = emitOnTimer.toScala.map {
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.asScala)
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.toScala)
})
.asJava
}

View file

@ -27,7 +27,7 @@ import pekko.japi.function
import pekko.stream._
import pekko.util.ConstantFun
import scala.jdk.FutureConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.jdk.OptionConverters._
import pekko.util.ccompat.JavaConverters._
@ -365,7 +365,7 @@ final class SourceWithContext[+Out, +Ctx, +Mat](delegate: scaladsl.SourceWithCon
* @see [[pekko.stream.javadsl.Source.throttle]]
*/
def throttle(elements: Int, per: java.time.Duration): SourceWithContext[Out, Ctx, Mat] =
viaScala(_.throttle(elements, per.asScala))
viaScala(_.throttle(elements, per.toScala))
/**
* Context-preserving variant of [[pekko.stream.javadsl.Source.throttle]].
@ -377,7 +377,7 @@ final class SourceWithContext[+Out, +Ctx, +Mat](delegate: scaladsl.SourceWithCon
per: java.time.Duration,
maximumBurst: Int,
mode: ThrottleMode): SourceWithContext[Out, Ctx, Mat] =
viaScala(_.throttle(elements, per.asScala, maximumBurst, mode))
viaScala(_.throttle(elements, per.toScala, maximumBurst, mode))
/**
* Context-preserving variant of [[pekko.stream.javadsl.Source.throttle]].
@ -388,7 +388,7 @@ final class SourceWithContext[+Out, +Ctx, +Mat](delegate: scaladsl.SourceWithCon
cost: Int,
per: java.time.Duration,
costCalculation: function.Function[Out, Integer]): SourceWithContext[Out, Ctx, Mat] =
viaScala(_.throttle(cost, per.asScala, costCalculation.apply))
viaScala(_.throttle(cost, per.toScala, costCalculation.apply))
/**
* Context-preserving variant of [[pekko.stream.javadsl.Source.throttle]].
@ -401,7 +401,7 @@ final class SourceWithContext[+Out, +Ctx, +Mat](delegate: scaladsl.SourceWithCon
maximumBurst: Int,
costCalculation: function.Function[Out, Integer],
mode: ThrottleMode): SourceWithContext[Out, Ctx, Mat] =
viaScala(_.throttle(cost, per.asScala, maximumBurst, costCalculation.apply, mode))
viaScala(_.throttle(cost, per.toScala, maximumBurst, costCalculation.apply, mode))
/**
* Connect this [[pekko.stream.javadsl.SourceWithContext]] to a [[pekko.stream.javadsl.Sink]],

View file

@ -25,7 +25,7 @@ import pekko.stream.IOResult
import pekko.stream.scaladsl.SinkToCompletionStage
import pekko.stream.scaladsl.SourceToCompletionStage
import pekko.util.ByteString
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
/**
* Converters for interacting with the blocking `java.io` streams APIs and Java 8 Streams
@ -101,7 +101,7 @@ object StreamConverters {
* @param readTimeout the max time the read operation on the materialized InputStream should block
*/
def asInputStream(readTimeout: java.time.Duration): Sink[ByteString, InputStream] =
new Sink(scaladsl.StreamConverters.asInputStream(readTimeout.asScala))
new Sink(scaladsl.StreamConverters.asInputStream(readTimeout.toScala))
/**
* Creates a Source from an [[java.io.InputStream]] created by the given function.
@ -157,7 +157,7 @@ object StreamConverters {
* @param writeTimeout the max time the write operation on the materialized OutputStream should block
*/
def asOutputStream(writeTimeout: java.time.Duration): javadsl.Source[ByteString, OutputStream] =
new Source(scaladsl.StreamConverters.asOutputStream(writeTimeout.asScala))
new Source(scaladsl.StreamConverters.asOutputStream(writeTimeout.toScala))
/**
* Creates a Source which when materialized will return an [[java.io.OutputStream]] which it is possible

View file

@ -29,7 +29,7 @@ import pekko.stream._
import pekko.stream.impl.fusing.{ StatefulMapConcat, ZipWithIndexJava }
import pekko.util.ConstantFun
import scala.jdk.FutureConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.jdk.OptionConverters._
import pekko.util.ccompat.JavaConverters._
@ -1023,7 +1023,7 @@ final class SubFlow[In, Out, Mat](
def groupedWithin(
maxNumber: Int,
duration: java.time.Duration): SubFlow[In, java.util.List[Out @uncheckedVariance], Mat] =
new SubFlow(delegate.groupedWithin(maxNumber, duration.asScala).map(_.asJava)) // TODO optimize to one step
new SubFlow(delegate.groupedWithin(maxNumber, duration.toScala).map(_.asJava)) // TODO optimize to one step
/**
* Chunk up this stream into groups of elements received within a time window,
@ -1047,7 +1047,7 @@ final class SubFlow[In, Out, Mat](
maxWeight: Long,
costFn: function.Function[Out, java.lang.Long],
duration: java.time.Duration): javadsl.SubFlow[In, java.util.List[Out @uncheckedVariance], Mat] =
new SubFlow(delegate.groupedWeightedWithin(maxWeight, duration.asScala)(costFn.apply).map(_.asJava))
new SubFlow(delegate.groupedWeightedWithin(maxWeight, duration.toScala)(costFn.apply).map(_.asJava))
/**
* Chunk up this stream into groups of elements received within a time window,
@ -1073,7 +1073,7 @@ final class SubFlow[In, Out, Mat](
maxNumber: Int,
costFn: function.Function[Out, java.lang.Long],
duration: java.time.Duration): javadsl.SubFlow[In, java.util.List[Out @uncheckedVariance], Mat] =
new SubFlow(delegate.groupedWeightedWithin(maxWeight, maxNumber, duration.asScala)(costFn.apply).map(_.asJava))
new SubFlow(delegate.groupedWeightedWithin(maxWeight, maxNumber, duration.toScala)(costFn.apply).map(_.asJava))
/**
* Shifts elements emission in time by a specified amount. It allows to store elements
@ -1101,7 +1101,7 @@ final class SubFlow[In, Out, Mat](
* @param strategy Strategy that is used when incoming elements cannot fit inside the buffer
*/
def delay(of: java.time.Duration, strategy: DelayOverflowStrategy): SubFlow[In, Out, Mat] =
new SubFlow(delegate.delay(of.asScala, strategy))
new SubFlow(delegate.delay(of.toScala, strategy))
/**
* Shifts elements emission in time by an amount individually determined through delay strategy a specified amount.
@ -1166,7 +1166,7 @@ final class SubFlow[In, Out, Mat](
* '''Cancels when''' downstream cancels
*/
def dropWithin(duration: java.time.Duration): SubFlow[In, Out, Mat] =
new SubFlow(delegate.dropWithin(duration.asScala))
new SubFlow(delegate.dropWithin(duration.toScala))
/**
* Terminate processing (and cancel the upstream publisher) after predicate
@ -1453,7 +1453,7 @@ final class SubFlow[In, Out, Mat](
* '''Cancels when''' downstream cancels or timer fires
*/
def takeWithin(duration: java.time.Duration): SubFlow[In, Out, Mat] =
new SubFlow(delegate.takeWithin(duration.asScala))
new SubFlow(delegate.takeWithin(duration.toScala))
/**
* Allows a faster upstream to progress independently of a slower subscriber by conflating elements into a summary
@ -2305,7 +2305,7 @@ final class SubFlow[In, Out, Mat](
* '''Cancels when''' downstream cancels
*/
def initialTimeout(timeout: java.time.Duration): SubFlow[In, Out, Mat] =
new SubFlow(delegate.initialTimeout(timeout.asScala))
new SubFlow(delegate.initialTimeout(timeout.toScala))
/**
* If the completion of the stream does not happen until the provided timeout, the stream is failed
@ -2320,7 +2320,7 @@ final class SubFlow[In, Out, Mat](
* '''Cancels when''' downstream cancels
*/
def completionTimeout(timeout: java.time.Duration): SubFlow[In, Out, Mat] =
new SubFlow(delegate.completionTimeout(timeout.asScala))
new SubFlow(delegate.completionTimeout(timeout.toScala))
/**
* If the time between two processed elements exceeds the provided timeout, the stream is failed
@ -2336,7 +2336,7 @@ final class SubFlow[In, Out, Mat](
* '''Cancels when''' downstream cancels
*/
def idleTimeout(timeout: java.time.Duration): SubFlow[In, Out, Mat] =
new SubFlow(delegate.idleTimeout(timeout.asScala))
new SubFlow(delegate.idleTimeout(timeout.toScala))
/**
* If the time between the emission of an element and the following downstream demand exceeds the provided timeout,
@ -2352,7 +2352,7 @@ final class SubFlow[In, Out, Mat](
* '''Cancels when''' downstream cancels
*/
def backpressureTimeout(timeout: java.time.Duration): SubFlow[In, Out, Mat] =
new SubFlow(delegate.backpressureTimeout(timeout.asScala))
new SubFlow(delegate.backpressureTimeout(timeout.toScala))
/**
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
@ -2372,7 +2372,7 @@ final class SubFlow[In, Out, Mat](
* '''Cancels when''' downstream cancels
*/
def keepAlive(maxIdle: java.time.Duration, injectedElem: function.Creator[Out]): SubFlow[In, Out, Mat] =
new SubFlow(delegate.keepAlive(maxIdle.asScala, () => injectedElem.create()))
new SubFlow(delegate.keepAlive(maxIdle.toScala, () => injectedElem.create()))
/**
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
@ -2404,7 +2404,7 @@ final class SubFlow[In, Out, Mat](
* '''Cancels when''' downstream cancels
*/
def throttle(elements: Int, per: java.time.Duration): javadsl.SubFlow[In, Out, Mat] =
new SubFlow(delegate.throttle(elements, per.asScala))
new SubFlow(delegate.throttle(elements, per.toScala))
/**
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
@ -2446,7 +2446,7 @@ final class SubFlow[In, Out, Mat](
per: java.time.Duration,
maximumBurst: Int,
mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] =
new SubFlow(delegate.throttle(elements, per.asScala, maximumBurst, mode))
new SubFlow(delegate.throttle(elements, per.toScala, maximumBurst, mode))
/**
* Sends elements downstream with speed limited to `cost/per`. Cost is
@ -2483,7 +2483,7 @@ final class SubFlow[In, Out, Mat](
cost: Int,
per: java.time.Duration,
costCalculation: function.Function[Out, Integer]): javadsl.SubFlow[In, Out, Mat] =
new SubFlow(delegate.throttle(cost, per.asScala, costCalculation.apply))
new SubFlow(delegate.throttle(cost, per.toScala, costCalculation.apply))
/**
* Sends elements downstream with speed limited to `cost/per`. Cost is
@ -2529,7 +2529,7 @@ final class SubFlow[In, Out, Mat](
maximumBurst: Int,
costCalculation: function.Function[Out, Integer],
mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] =
new SubFlow(delegate.throttle(cost, per.asScala, maximumBurst, costCalculation.apply, mode))
new SubFlow(delegate.throttle(cost, per.toScala, maximumBurst, costCalculation.apply, mode))
/**
* Detaches upstream demand from downstream demand without detaching the
@ -2557,7 +2557,7 @@ final class SubFlow[In, Out, Mat](
* '''Cancels when''' downstream cancels
*/
def initialDelay(delay: java.time.Duration): SubFlow[In, Out, Mat] =
new SubFlow(delegate.initialDelay(delay.asScala))
new SubFlow(delegate.initialDelay(delay.toScala))
/**
* Change the attributes of this [[Source]] to the given ones and seal the list
@ -2797,7 +2797,7 @@ final class SubFlow[In, Out, Mat](
aggregate = (agg, out) => aggregate.apply(agg, out).toScala,
harvest = agg => harvest.apply(agg),
emitOnTimer = Option(emitOnTimer).map {
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.asScala)
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.toScala)
}))
}
@ -2830,7 +2830,7 @@ final class SubFlow[In, Out, Mat](
aggregate = (agg, out) => aggregate.apply(agg, out).toScala,
harvest = agg => harvest.apply(agg),
emitOnTimer = emitOnTimer.toScala.map {
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.asScala)
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.toScala)
}))
}

View file

@ -29,7 +29,7 @@ import pekko.stream._
import pekko.stream.impl.fusing.{ StatefulMapConcat, ZipWithIndexJava }
import pekko.util.ConstantFun
import scala.jdk.FutureConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.jdk.OptionConverters._
import pekko.util.ccompat.JavaConverters._
@ -1009,7 +1009,7 @@ final class SubSource[Out, Mat](
def groupedWithin(
maxNumber: Int,
duration: java.time.Duration): SubSource[java.util.List[Out @uncheckedVariance], Mat] =
new SubSource(delegate.groupedWithin(maxNumber, duration.asScala).map(_.asJava)) // TODO optimize to one step
new SubSource(delegate.groupedWithin(maxNumber, duration.toScala).map(_.asJava)) // TODO optimize to one step
/**
* Chunk up this stream into groups of elements received within a time window,
@ -1033,7 +1033,7 @@ final class SubSource[Out, Mat](
maxWeight: Long,
costFn: function.Function[Out, java.lang.Long],
duration: java.time.Duration): javadsl.SubSource[java.util.List[Out @uncheckedVariance], Mat] =
new SubSource(delegate.groupedWeightedWithin(maxWeight, duration.asScala)(costFn.apply).map(_.asJava))
new SubSource(delegate.groupedWeightedWithin(maxWeight, duration.toScala)(costFn.apply).map(_.asJava))
/**
* Chunk up this stream into groups of elements received within a time window,
@ -1059,7 +1059,7 @@ final class SubSource[Out, Mat](
maxNumber: Int,
costFn: function.Function[Out, java.lang.Long],
duration: java.time.Duration): javadsl.SubSource[java.util.List[Out @uncheckedVariance], Mat] =
new SubSource(delegate.groupedWeightedWithin(maxWeight, maxNumber, duration.asScala)(costFn.apply).map(_.asJava))
new SubSource(delegate.groupedWeightedWithin(maxWeight, maxNumber, duration.toScala)(costFn.apply).map(_.asJava))
/**
* Discard the given number of elements at the beginning of the stream.
@ -1088,7 +1088,7 @@ final class SubSource[Out, Mat](
* '''Cancels when''' downstream cancels
*/
def dropWithin(duration: java.time.Duration): SubSource[Out, Mat] =
new SubSource(delegate.dropWithin(duration.asScala))
new SubSource(delegate.dropWithin(duration.toScala))
/**
* Terminate processing (and cancel the upstream publisher) after predicate
@ -1193,7 +1193,7 @@ final class SubSource[Out, Mat](
* @param strategy Strategy that is used when incoming elements cannot fit inside the buffer
*/
def delay(of: java.time.Duration, strategy: DelayOverflowStrategy): SubSource[Out, Mat] =
new SubSource(delegate.delay(of.asScala, strategy))
new SubSource(delegate.delay(of.toScala, strategy))
/**
* Shifts elements emission in time by an amount individually determined through delay strategy a specified amount.
@ -1428,7 +1428,7 @@ final class SubSource[Out, Mat](
* '''Cancels when''' downstream cancels or timer fires
*/
def takeWithin(duration: java.time.Duration): SubSource[Out, Mat] =
new SubSource(delegate.takeWithin(duration.asScala))
new SubSource(delegate.takeWithin(duration.toScala))
/**
* Allows a faster upstream to progress independently of a slower subscriber by conflating elements into a summary
@ -2279,7 +2279,7 @@ final class SubSource[Out, Mat](
* '''Cancels when''' downstream cancels
*/
def initialTimeout(timeout: java.time.Duration): SubSource[Out, Mat] =
new SubSource(delegate.initialTimeout(timeout.asScala))
new SubSource(delegate.initialTimeout(timeout.toScala))
/**
* If the completion of the stream does not happen until the provided timeout, the stream is failed
@ -2294,7 +2294,7 @@ final class SubSource[Out, Mat](
* '''Cancels when''' downstream cancels
*/
def completionTimeout(timeout: java.time.Duration): SubSource[Out, Mat] =
new SubSource(delegate.completionTimeout(timeout.asScala))
new SubSource(delegate.completionTimeout(timeout.toScala))
/**
* If the time between two processed elements exceeds the provided timeout, the stream is failed
@ -2310,7 +2310,7 @@ final class SubSource[Out, Mat](
* '''Cancels when''' downstream cancels
*/
def idleTimeout(timeout: java.time.Duration): SubSource[Out, Mat] =
new SubSource(delegate.idleTimeout(timeout.asScala))
new SubSource(delegate.idleTimeout(timeout.toScala))
/**
* If the time between the emission of an element and the following downstream demand exceeds the provided timeout,
@ -2326,7 +2326,7 @@ final class SubSource[Out, Mat](
* '''Cancels when''' downstream cancels
*/
def backpressureTimeout(timeout: java.time.Duration): SubSource[Out, Mat] =
new SubSource(delegate.backpressureTimeout(timeout.asScala))
new SubSource(delegate.backpressureTimeout(timeout.toScala))
/**
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
@ -2346,7 +2346,7 @@ final class SubSource[Out, Mat](
* '''Cancels when''' downstream cancels
*/
def keepAlive(maxIdle: java.time.Duration, injectedElem: function.Creator[Out]): SubSource[Out, Mat] =
new SubSource(delegate.keepAlive(maxIdle.asScala, () => injectedElem.create()))
new SubSource(delegate.keepAlive(maxIdle.toScala, () => injectedElem.create()))
/**
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
@ -2378,7 +2378,7 @@ final class SubSource[Out, Mat](
* '''Cancels when''' downstream cancels
*/
def throttle(elements: Int, per: java.time.Duration): javadsl.SubSource[Out, Mat] =
new SubSource(delegate.throttle(elements, per.asScala))
new SubSource(delegate.throttle(elements, per.toScala))
/**
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
@ -2420,7 +2420,7 @@ final class SubSource[Out, Mat](
per: java.time.Duration,
maximumBurst: Int,
mode: ThrottleMode): javadsl.SubSource[Out, Mat] =
new SubSource(delegate.throttle(elements, per.asScala, maximumBurst, mode))
new SubSource(delegate.throttle(elements, per.toScala, maximumBurst, mode))
/**
* Sends elements downstream with speed limited to `cost/per`. Cost is
@ -2457,7 +2457,7 @@ final class SubSource[Out, Mat](
cost: Int,
per: java.time.Duration,
costCalculation: function.Function[Out, Integer]): javadsl.SubSource[Out, Mat] =
new SubSource(delegate.throttle(cost, per.asScala, costCalculation.apply _))
new SubSource(delegate.throttle(cost, per.toScala, costCalculation.apply _))
/**
* Sends elements downstream with speed limited to `cost/per`. Cost is
@ -2503,7 +2503,7 @@ final class SubSource[Out, Mat](
maximumBurst: Int,
costCalculation: function.Function[Out, Integer],
mode: ThrottleMode): javadsl.SubSource[Out, Mat] =
new SubSource(delegate.throttle(cost, per.asScala, maximumBurst, costCalculation.apply _, mode))
new SubSource(delegate.throttle(cost, per.toScala, maximumBurst, costCalculation.apply _, mode))
/**
* Detaches upstream demand from downstream demand without detaching the
@ -2531,7 +2531,7 @@ final class SubSource[Out, Mat](
* '''Cancels when''' downstream cancels
*/
def initialDelay(delay: java.time.Duration): SubSource[Out, Mat] =
new SubSource(delegate.initialDelay(delay.asScala))
new SubSource(delegate.initialDelay(delay.toScala))
/**
* Change the attributes of this [[Source]] to the given ones and seal the list
@ -2771,7 +2771,7 @@ final class SubSource[Out, Mat](
aggregate = (agg, out) => aggregate.apply(agg, out).toScala,
harvest = agg => harvest.apply(agg),
emitOnTimer = Option(emitOnTimer).map {
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.asScala)
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.toScala)
}))
}
@ -2804,7 +2804,7 @@ final class SubSource[Out, Mat](
aggregate = (agg, out) => aggregate.apply(agg, out).toScala,
harvest = agg => harvest.apply(agg),
emitOnTimer = emitOnTimer.toScala.map {
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.asScala)
case Pair(predicate, duration) => (agg => predicate.test(agg), duration.toScala)
}))
}
}

View file

@ -40,7 +40,7 @@ import pekko.stream.TLSClosing
import pekko.stream.scaladsl
import pekko.util.ByteString
import scala.jdk.FutureConverters._
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import scala.jdk.OptionConverters._
object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider {
@ -355,6 +355,6 @@ class Tcp(system: ExtendedActorSystem) extends pekko.actor.Extension {
}
private def optionalDurationToScala(duration: Optional[java.time.Duration]) = {
if (duration.isPresent) duration.get.asScala else Duration.Inf
if (duration.isPresent) duration.get.toScala else Duration.Inf
}
}

View file

@ -42,7 +42,7 @@ import pekko.stream.impl.io.ConnectionSourceStage
import pekko.stream.impl.io.OutgoingConnectionStage
import pekko.stream.impl.io.TcpIdleTimeout
import pekko.util.ByteString
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.unused
object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider {
@ -114,7 +114,7 @@ final class Tcp(system: ExtendedActorSystem) extends pekko.actor.Extension {
// TODO maybe this should be a new setting, like `pekko.stream.tcp.bind.timeout` / `shutdown-timeout` instead?
val bindShutdownTimeout: FiniteDuration =
system.settings.config.getDuration("pekko.stream.materializer.subscription-timeout.timeout").asScala
system.settings.config.getDuration("pekko.stream.materializer.subscription-timeout.timeout").toScala
/**
* Creates a [[Tcp.ServerBinding]] instance which represents a prospective TCP server binding on the given `endpoint`.

View file

@ -1759,8 +1759,8 @@ abstract class TimerGraphStageLogic(_shape: Shape) extends GraphStageLogic(_shap
* adding the new timer.
*/
final protected def scheduleOnce(timerKey: Any, delay: java.time.Duration): Unit = {
import pekko.util.JavaDurationConverters._
scheduleOnce(timerKey, delay.asScala)
import scala.jdk.DurationConverters._
scheduleOnce(timerKey, delay.toScala)
}
/**
@ -1793,8 +1793,8 @@ abstract class TimerGraphStageLogic(_shape: Shape) extends GraphStageLogic(_shap
timerKey: Any,
initialDelay: java.time.Duration,
interval: java.time.Duration): Unit = {
import pekko.util.JavaDurationConverters._
scheduleWithFixedDelay(timerKey, initialDelay.asScala, interval.asScala)
import scala.jdk.DurationConverters._
scheduleWithFixedDelay(timerKey, initialDelay.toScala, interval.toScala)
}
/**
@ -1827,8 +1827,8 @@ abstract class TimerGraphStageLogic(_shape: Shape) extends GraphStageLogic(_shap
timerKey: Any,
initialDelay: java.time.Duration,
interval: java.time.Duration): Unit = {
import pekko.util.JavaDurationConverters._
scheduleAtFixedRate(timerKey, initialDelay.asScala, interval.asScala)
import scala.jdk.DurationConverters._
scheduleAtFixedRate(timerKey, initialDelay.toScala, interval.toScala)
}
/**

View file

@ -23,7 +23,7 @@ import org.apache.pekko
import pekko.actor._
import pekko.annotation.InternalApi
import pekko.testkit.{ TestActor, TestDuration, TestProbe }
import pekko.util.JavaDurationConverters._
import scala.jdk.DurationConverters._
import pekko.util.ccompat.JavaConverters._
/**
@ -77,7 +77,7 @@ class TestKit(system: ActorSystem) {
/**
* Java timeouts (durations) during tests with the configured
*/
def dilated(duration: java.time.Duration): java.time.Duration = duration.asScala.dilated(getSystem).asJava
def dilated(duration: java.time.Duration): java.time.Duration = duration.toScala.dilated(getSystem).toJava
/**
* Query queue status.
@ -144,20 +144,20 @@ class TestKit(system: ActorSystem) {
* block or throw an [[java.lang.AssertionError]] if no `within` block surrounds this
* call.
*/
def getRemaining: java.time.Duration = tp.remaining.asJava
def getRemaining: java.time.Duration = tp.remaining.toJava
/**
* Obtain time remaining for execution of the innermost enclosing `within`
* block or missing that it returns the given duration.
*/
def getRemainingOr(duration: java.time.Duration): java.time.Duration = tp.remainingOr(duration.asScala).asJava
def getRemainingOr(duration: java.time.Duration): java.time.Duration = tp.remainingOr(duration.toScala).toJava
/**
* Obtain time remaining for execution of the innermost enclosing `within`
* block or missing that it returns the properly dilated default for this
* case from settings (key "pekko.test.single-expect-default").
*/
def getRemainingOrDefault: java.time.Duration = tp.remainingOrDefault.asJava
def getRemainingOrDefault: java.time.Duration = tp.remainingOrDefault.toJava
/**
* Execute code block while bounding its execution time between `min` and
@ -178,7 +178,7 @@ class TestKit(system: ActorSystem) {
* }}}
*/
def within[T](min: java.time.Duration, max: java.time.Duration, f: Supplier[T]): T =
tp.within(min.asScala, max.asScala)(f.get)
tp.within(min.toScala, max.toScala)(f.get)
/**
* Execute code block while bounding its execution time between `min` and
@ -198,7 +198,7 @@ class TestKit(system: ActorSystem) {
*
* }}}
*/
def within[T](max: java.time.Duration, f: Supplier[T]): T = tp.within(max.asScala)(f.get)
def within[T](max: java.time.Duration, f: Supplier[T]): T = tp.within(max.toScala)(f.get)
/**
* Await until the given condition evaluates to `true` or the timeout
@ -222,7 +222,7 @@ class TestKit(system: ActorSystem) {
* Note that the timeout is scaled using Duration.dilated,
* which uses the configuration entry "pekko.test.timefactor".
*/
def awaitCond(max: java.time.Duration, p: Supplier[Boolean]): Unit = tp.awaitCond(p.get, max.asScala)
def awaitCond(max: java.time.Duration, p: Supplier[Boolean]): Unit = tp.awaitCond(p.get, max.toScala)
/**
* Await until the given condition evaluates to `true` or the timeout
@ -235,7 +235,7 @@ class TestKit(system: ActorSystem) {
* which uses the configuration entry "pekko.test.timefactor".
*/
def awaitCond(max: java.time.Duration, interval: java.time.Duration, p: Supplier[Boolean]): Unit =
tp.awaitCond(p.get, max.asScala, interval.asScala)
tp.awaitCond(p.get, max.toScala, interval.toScala)
/**
* Await until the given condition evaluates to `true` or the timeout
@ -248,7 +248,7 @@ class TestKit(system: ActorSystem) {
* which uses the configuration entry "pekko.test.timefactor".
*/
def awaitCond(max: java.time.Duration, interval: java.time.Duration, message: String, p: Supplier[Boolean]): Unit =
tp.awaitCond(p.get, max.asScala, interval.asScala, message)
tp.awaitCond(p.get, max.toScala, interval.toScala, message)
/**
* Evaluate the given assert every `interval` until it does not throw an exception and return the
@ -276,7 +276,7 @@ class TestKit(system: ActorSystem) {
* Note that the timeout is scaled using Duration.dilated,
* which uses the configuration entry "pekko.test.timefactor".
*/
def awaitAssert[A](max: java.time.Duration, a: Supplier[A]): A = tp.awaitAssert(a.get, max.asScala)
def awaitAssert[A](max: java.time.Duration, a: Supplier[A]): A = tp.awaitAssert(a.get, max.toScala)
/**
* Evaluate the given assert every `interval` until it does not throw an exception.
@ -288,7 +288,7 @@ class TestKit(system: ActorSystem) {
* @return an arbitrary value that would be returned from awaitAssert if successful, if not interested in such value you can return null.
*/
def awaitAssert[A](max: java.time.Duration, interval: java.time.Duration, a: Supplier[A]): A =
tp.awaitAssert(a.get, max.asScala, interval.asScala)
tp.awaitAssert(a.get, max.toScala, interval.toScala)
/**
* Same as `expectMsg(remainingOrDefault, obj)`, but correctly treating the timeFactor.
@ -302,7 +302,7 @@ class TestKit(system: ActorSystem) {
*
* @return the received object
*/
def expectMsgEquals[T](max: java.time.Duration, obj: T): T = tp.expectMsg(max.asScala, obj)
def expectMsgEquals[T](max: java.time.Duration, obj: T): T = tp.expectMsg(max.toScala, obj)
/**
* Same as `expectMsg(remainingOrDefault, obj)`, but correctly treating the timeFactor.
@ -314,7 +314,7 @@ class TestKit(system: ActorSystem) {
* given object. Wait time is bounded by the given duration, with an
* AssertionFailure being thrown in case of timeout.
*/
def expectMsg[T](max: java.time.Duration, obj: T): T = tp.expectMsg(max.asScala, obj)
def expectMsg[T](max: java.time.Duration, obj: T): T = tp.expectMsg(max.toScala, obj)
/**
* Receive one message from the test actor and assert that it equals the
@ -347,7 +347,7 @@ class TestKit(system: ActorSystem) {
* processing.
*/
def expectMsgPF[T](max: java.time.Duration, hint: String, f: JFunction[Any, T]): T = {
tp.expectMsgPF(max.asScala, hint)(new CachingPartialFunction[Any, T] {
tp.expectMsgPF(max.toScala, hint)(new CachingPartialFunction[Any, T] {
@throws(classOf[Exception])
override def `match`(x: Any): T = f.apply(x)
})
@ -363,7 +363,7 @@ class TestKit(system: ActorSystem) {
* the given class. Wait time is bounded by the given duration, with an
* AssertionFailure being thrown in case of timeout.
*/
def expectMsgClass[T](max: java.time.Duration, c: Class[T]): T = tp.expectMsgClass(max.asScala, c)
def expectMsgClass[T](max: java.time.Duration, c: Class[T]): T = tp.expectMsgClass(max.toScala, c)
/**
* Same as `expectMsgAnyOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor.
@ -377,7 +377,7 @@ class TestKit(system: ActorSystem) {
* AssertionFailure being thrown in case of timeout.
*/
@varargs
def expectMsgAnyOfWithin[T](max: java.time.Duration, objs: T*): T = tp.expectMsgAnyOf(max.asScala, objs: _*)
def expectMsgAnyOfWithin[T](max: java.time.Duration, objs: T*): T = tp.expectMsgAnyOf(max.toScala, objs: _*)
/**
* Same as `expectMsgAllOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor.
@ -394,7 +394,7 @@ class TestKit(system: ActorSystem) {
*/
@varargs
def expectMsgAllOfWithin[T](max: java.time.Duration, objs: T*): JList[T] =
tp.expectMsgAllOf(max.asScala, objs: _*).asJava
tp.expectMsgAllOf(max.toScala, objs: _*).asJava
/**
* Same as `expectMsgAnyClassOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor.
@ -409,7 +409,7 @@ class TestKit(system: ActorSystem) {
*/
@varargs
def expectMsgAnyClassOf[T](max: java.time.Duration, objs: Class[_]*): T =
tp.expectMsgAnyClassOf(max.asScala, objs: _*).asInstanceOf[T]
tp.expectMsgAnyClassOf(max.toScala, objs: _*).asInstanceOf[T]
/**
* Assert that no message is received. Waits for the default period configured as
@ -422,7 +422,7 @@ class TestKit(system: ActorSystem) {
* Assert that no message is received for the specified time.
* Supplied value is not dilated.
*/
def expectNoMessage(max: java.time.Duration): Unit = tp.expectNoMessage(max.asScala)
def expectNoMessage(max: java.time.Duration): Unit = tp.expectNoMessage(max.toScala)
/**
* Receive one message from the test actor and assert that it is the Terminated message of the given ActorRef.
@ -433,7 +433,7 @@ class TestKit(system: ActorSystem) {
* @param target the actor ref expected to be Terminated
* @return the received Terminated message
*/
def expectTerminated(max: java.time.Duration, target: ActorRef): Terminated = tp.expectTerminated(target, max.asScala)
def expectTerminated(max: java.time.Duration, target: ActorRef): Terminated = tp.expectTerminated(target, max.toScala)
/**
* Receive one message from the test actor and assert that it is the Terminated message of the given ActorRef.
@ -454,7 +454,7 @@ class TestKit(system: ActorSystem) {
* partial function returned true
*/
def fishForMessage(max: java.time.Duration, hint: String, f: JFunction[Any, Boolean]): Any =
tp.fishForMessage(max.asScala, hint)(new CachingPartialFunction[Any, Boolean] {
tp.fishForMessage(max.toScala, hint)(new CachingPartialFunction[Any, Boolean] {
@throws(classOf[Exception])
override def `match`(x: Any): Boolean = f.apply(x)
})
@ -463,7 +463,7 @@ class TestKit(system: ActorSystem) {
* Same as `fishForMessage`, but gets a different partial function and returns properly typed message.
*/
def fishForSpecificMessage[T](max: java.time.Duration, hint: String, f: JFunction[Any, T]): T = {
tp.fishForSpecificMessage(max.asScala, hint)(new CachingPartialFunction[Any, T] {
tp.fishForSpecificMessage(max.toScala, hint)(new CachingPartialFunction[Any, T] {
@throws(classOf[Exception])
override def `match`(x: Any): T = f.apply(x)
})
@ -479,7 +479,7 @@ class TestKit(system: ActorSystem) {
/**
* Receive N messages in a row before the given deadline.
*/
def receiveN(n: Int, max: java.time.Duration): JList[AnyRef] = tp.receiveN(n, max.asScala).asJava
def receiveN(n: Int, max: java.time.Duration): JList[AnyRef] = tp.receiveN(n, max.toScala).asJava
/**
* Receive one message from the internal queue of the TestActor. If the given
@ -487,7 +487,7 @@ class TestKit(system: ActorSystem) {
*
* This method does NOT automatically scale its Duration parameter!
*/
def receiveOne(max: java.time.Duration): AnyRef = tp.receiveOne(max.asScala)
def receiveOne(max: java.time.Duration): AnyRef = tp.receiveOne(max.toScala)
/**
* Receive a series of messages until one does not match the given partial
@ -505,7 +505,7 @@ class TestKit(system: ActorSystem) {
idle: java.time.Duration,
messages: Int,
f: JFunction[AnyRef, T]): JList[T] = {
tp.receiveWhile(max.asScala, idle.asScala, messages)(new CachingPartialFunction[AnyRef, T] {
tp.receiveWhile(max.toScala, idle.toScala, messages)(new CachingPartialFunction[AnyRef, T] {
@throws(classOf[Exception])
override def `match`(x: AnyRef): T = f.apply(x)
})
@ -513,7 +513,7 @@ class TestKit(system: ActorSystem) {
}
def receiveWhile[T](max: java.time.Duration, f: JFunction[AnyRef, T]): JList[T] = {
tp.receiveWhile(max = max.asScala)(new CachingPartialFunction[AnyRef, T] {
tp.receiveWhile(max = max.toScala)(new CachingPartialFunction[AnyRef, T] {
@throws(classOf[Exception])
override def `match`(x: AnyRef): T = f.apply(x)
})