Use GhExcludeTest in CI again (#30941)

* -XX:+AlwaysActAsServerClassMachine in CI
* Remove GHExcludeTest from all tests, since we have included those in CI workflows
* Exclude a few failing tests from CI
* run CI with -Dakka.test.tags.exclude=gh-exclude again
This commit is contained in:
Patrik Nordwall 2021-11-28 19:52:16 +01:00 committed by GitHub
parent cef765a298
commit 2909d4cce5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
20 changed files with 51 additions and 46 deletions

View file

@ -60,6 +60,7 @@ jobs:
-Dakka.test.multi-in-test=false \
-Dakka.test.timefactor=2 \
-Dakka.actor.testkit.typed.timefactor=2 \
-Dakka.test.tags.exclude=gh-exclude \
-Dakka.cluster.assert=on \
-Dsbt.override.build.repos=false \
-Dakka.test.multi-node=false \

View file

@ -53,6 +53,7 @@ jobs:
sbt -jvm-opts .jvmopts-ci \
-Dakka.test.timefactor=2 \
-Dakka.actor.testkit.typed.timefactor=2 \
-Dakka.test.tags.exclude=gh-exclude \
-Dakka.cluster.assert=on \
-Dsbt.override.build.repos=false \
-Dakka.test.multi-node=true \
@ -62,6 +63,7 @@ jobs:
-Dmultinode.Xms512M \
-Dmultinode.Xmx512M \
-Dmultinode.Xlog:gc \
-Dmultinode.XX:+AlwaysActAsServerClassMachine \
-Daeron.dir=/opt/volumes/media-driver \
-Dmultinode.Daeron.dir=/opt/volumes/media-driver \
-Daeron.term.buffer.length=33554432 \
@ -134,6 +136,7 @@ jobs:
sbt -jvm-opts .jvmopts-ci \
-Dakka.test.timefactor=2 \
-Dakka.actor.testkit.typed.timefactor=2 \
-Dakka.test.tags.exclude=gh-exclude \
-Dakka.cluster.assert=on \
-Dakka.remote.artery.transport=aeron-udp \
-Dsbt.override.build.repos=false \
@ -145,6 +148,7 @@ jobs:
-Dmultinode.Xms512M \
-Dmultinode.Xmx512M \
-Dmultinode.Xlog:gc \
-Dmultinode.XX:+AlwaysActAsServerClassMachine \
-Daeron.dir=/opt/volumes/media-driver \
-Dmultinode.Daeron.dir=/opt/volumes/media-driver \
-Daeron.term.buffer.length=33554432 \

View file

@ -39,6 +39,7 @@ jobs:
-Dmultinode.Xms256M \
-Dmultinode.Xmx256M \
-Dmultinode.Xlog:gc \
-Dmultinode.XX:+AlwaysActAsServerClassMachine \
clean akka-cluster-metrics/test
- name: Test Reports
@ -98,13 +99,14 @@ jobs:
-Dakka.remote.artery.enabled=off \
-Dakka.test.timefactor=2 \
-Dakka.actor.testkit.typed.timefactor=2 \
-Dakka.cluster.assert=on \
-Dakka.test.tags.exclude=gh-exclude \
-Dakka.cluster.assert=on \
-Dakka.test.names.exclude=akka.cluster.Stress \
-Dmultinode.XX:MetaspaceSize=128M \
-Dmultinode.Xms256M \
-Dmultinode.Xmx256M \
-Dmultinode.Xlog:gc \
-Dmultinode.XX:+AlwaysActAsServerClassMachine \
clean ${{ matrix.command }}
jdk-nightly-build:
@ -147,10 +149,12 @@ jobs:
-Dakka.log.timestamps=true \
-Dakka.test.timefactor=2 \
-Dakka.actor.testkit.typed.timefactor=2 \
-Dakka.test.tags.exclude=gh-exclude \
-Dakka.test.multi-in-test=false \
-Dmultinode.XX:MetaspaceSize=128M \
-Dmultinode.Xms256M \
-Dmultinode.Xmx256M \
-Dmultinode.XX:+AlwaysActAsServerClassMachine \
${{ matrix.extraOpts }} \
clean Test/compile test checkTestsHaveRun

View file

@ -58,4 +58,5 @@ jobs:
-Dmultinode.Xms256M \
-Dmultinode.Xmx256M \
-Dmultinode.Xlog:gc \
-Dmultinode.XX:+AlwaysActAsServerClassMachine \
${{ matrix.command }}

View file

@ -4,6 +4,7 @@
-Xms3G
-Xmx3G
-Xss2M
-XX:+AlwaysActAsServerClassMachine
-XX:ReservedCodeCacheSize=256m
-XX:MaxGCPauseMillis=750
-XX:-UseBiasedLocking

View file

@ -1,3 +1,4 @@
-J-Xmx3072M
-J-Xms1024M
-Dmultinode.XX:MetaspaceSize=128M
-Dmultinode.XX:MetaspaceSize=128M
-J-XX:+AlwaysActAsServerClassMachine

View file

@ -82,8 +82,7 @@ class CircuitBreakerMTSpec extends AkkaSpec {
result.toSet should ===(Set("succeed", "CBO"))
}
// Excluded on GH Actions: https://github.com/akka/akka/issues/30476
"recover and reset the breaker after the reset timeout" taggedAs GHExcludeTest in {
"recover and reset the breaker after the reset timeout" in {
val halfOpenLatch = new TestLatch(1)
breaker.onHalfOpen(halfOpenLatch.countDown())
openBreaker()

View file

@ -12,7 +12,6 @@ import scala.concurrent.duration._
import akka.actor.{ Actor, ActorLogging, ActorRef, PoisonPill, Props }
import akka.testkit.AkkaSpec
import akka.testkit.GHExcludeTest
import akka.testkit.ImplicitSender
import akka.testkit.TestLatch
import org.scalatest.BeforeAndAfterEach
@ -135,7 +134,7 @@ class BalancingSpec extends AkkaSpec("""
test(pool, startOthers, latch)
}
"deliver messages in a balancing fashion when defined in config" taggedAs GHExcludeTest in {
"deliver messages in a balancing fashion when defined in config" in {
val latch = TestLatch(poolSize)
val startOthers = Promise[Unit]()
val pool =
@ -145,7 +144,7 @@ class BalancingSpec extends AkkaSpec("""
test(pool, startOthers, latch)
}
"deliver messages in a balancing fashion when overridden in config" taggedAs GHExcludeTest in {
"deliver messages in a balancing fashion when overridden in config" in {
val latch = TestLatch(poolSize)
val startOthers = Promise[Unit]()
val pool =

View file

@ -21,7 +21,7 @@ import org.scalatest.time.Span
import org.scalatest.time.SpanSugar._
import org.scalatest.wordspec.AnyWordSpec
import akka.testkit.{ GHExcludeTest, TimingTest }
import akka.testkit.TimingTest
import akka.util.DefaultExecutionContext._
import akka.util.ccompat.JavaConverters._
@ -284,8 +284,7 @@ class BoundedBlockingQueueSpec
(events should contain).inOrder(awaitNotFull, signalNotFull, offer("World"))
}
// Excluded on GH Actions: https://github.com/akka/akka/issues/30479
"check the backing queue size before offering" taggedAs GHExcludeTest in {
"check the backing queue size before offering" in {
val TestContext(queue, events, _, notFull, lock, _) = newBoundedBlockingQueue(1)
queue.put("Hello")
// Blocks until another thread signals `notFull`

View file

@ -26,7 +26,8 @@ import akka.routing.FromConfig
import akka.routing.GetRoutees
import akka.routing.Routees
import akka.serialization.jackson.CborSerializable
import akka.testkit.{ DefaultTimeout, GHExcludeTest, ImplicitSender, LongRunningTest }
import akka.testkit.GHExcludeTest
import akka.testkit.{ DefaultTimeout, ImplicitSender, LongRunningTest }
import akka.util.unused
object AdaptiveLoadBalancingRouterConfig extends MultiNodeConfig {
@ -171,8 +172,7 @@ abstract class AdaptiveLoadBalancingRouterSpec
enterBarrier("after-1")
}
// Excluded on GH Actions: https://github.com/akka/akka/issues/30486
"use all nodes in the cluster when not overloaded" taggedAs (LongRunningTest, GHExcludeTest) in {
"use all nodes in the cluster when not overloaded" taggedAs LongRunningTest in {
runOn(node1) {
val router1 = startRouter("router1")

View file

@ -177,8 +177,7 @@ abstract class ClusterShardingRememberEntitiesPerfSpec
enterBarrier(s"after-start-stop-${testRun}")
}
// Excluded on GH Actions: https://github.com/akka/akka/issues/30486
"test when starting new entity" taggedAs GHExcludeTest in {
"test when starting new entity" in {
val numberOfMessages = 200 * NrOfMessagesFactor
runBench("start new entities") { (iteration, region, histogram) =>
(1 to numberOfMessages).foreach { n =>
@ -191,8 +190,7 @@ abstract class ClusterShardingRememberEntitiesPerfSpec
}
}
// Excluded on GH Actions: https://github.com/akka/akka/issues/30486
"test latency when starting new entity and sending a few messages" taggedAs GHExcludeTest in {
"test latency when starting new entity and sending a few messages" in {
val numberOfMessages = 800 * NrOfMessagesFactor
runBench("start, few messages") { (iteration, region, histogram) =>
for (n <- 1 to numberOfMessages / 5; _ <- 1 to 5) {
@ -205,8 +203,7 @@ abstract class ClusterShardingRememberEntitiesPerfSpec
}
}
// Excluded on GH Actions: https://github.com/akka/akka/issues/30486
"test latency when starting new entity and sending a few messages to it and stopping" taggedAs GHExcludeTest in {
"test latency when starting new entity and sending a few messages to it and stopping" in {
val numberOfMessages = 800 * NrOfMessagesFactor
// 160 entities, and an extra one for the intialization
// all but the first one are not removed
@ -240,8 +237,7 @@ abstract class ClusterShardingRememberEntitiesPerfSpec
}
}
// Excluded on GH Actions: https://github.com/akka/akka/issues/30486
"test latency when starting, few messages, stopping, few messages" taggedAs GHExcludeTest in {
"test latency when starting, few messages, stopping, few messages" in {
val numberOfMessages = 800 * NrOfMessagesFactor
runBench("start, few messages, stop, few messages") { (iteration, region, histogram) =>
for (n <- 1 to numberOfMessages / 5; m <- 1 to 5) {
@ -264,8 +260,7 @@ abstract class ClusterShardingRememberEntitiesPerfSpec
}
}
// Excluded on GH Actions: https://github.com/akka/akka/issues/30486
"test when starting some new entities mixed with sending to started" taggedAs GHExcludeTest in {
"test when starting some new entities mixed with sending to started" in {
runBench("starting mixed with sending to started") { (iteration, region, histogram) =>
val numberOfMessages = 1600 * NrOfMessagesFactor
(1 to numberOfMessages).foreach { n =>
@ -289,8 +284,7 @@ abstract class ClusterShardingRememberEntitiesPerfSpec
}
}
// Excluded on GH Actions: https://github.com/akka/akka/issues/30486
"test sending to started" taggedAs GHExcludeTest in {
"test sending to started" in {
runBench("sending to started") { (iteration, region, histogram) =>
val numberOfMessages = 1600 * NrOfMessagesFactor
(1 to numberOfMessages).foreach { n =>

View file

@ -21,7 +21,6 @@ import akka.cluster.typed.PrepareForFullClusterShutdown
import akka.remote.testkit.MultiNodeConfig
import akka.remote.testkit.MultiNodeSpec
import akka.serialization.jackson.CborSerializable
import akka.testkit.GHExcludeTest
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._
@ -79,8 +78,7 @@ class ClusterShardingPreparingForShutdownSpec
formCluster(first, second, third)
}
// Excluded on GH Actions: https://github.com/akka/akka/issues/30486
"not rebalance but should still work preparing for shutdown" taggedAs GHExcludeTest in {
"not rebalance but should still work preparing for shutdown" in {
val shardRegion: ActorRef[ShardingEnvelope[Command]] =
sharding.init(Entity(typeKey)(_ => Pinger()))

View file

@ -29,7 +29,6 @@ import akka.cluster.typed.Join
import akka.persistence.journal.inmem.InmemJournal
import akka.persistence.typed.PersistenceId
import akka.persistence.typed.delivery.EventSourcedProducerQueue
import akka.testkit.GHExcludeTest
object DurableShardingSpec {
def conf: Config =
@ -81,8 +80,7 @@ class DurableShardingSpec
Cluster(system).manager ! Join(Cluster(system).selfMember.address)
}
// GHExclude tracked in https://github.com/akka/akka/issues/30489
"load initial state and resend unconfirmed" taggedAs GHExcludeTest in {
"load initial state and resend unconfirmed" in {
pending // FIXME issue #30489, this could be a real problem
nextId()
val typeKey = EntityTypeKey[SequencedMessage[TestConsumer.Job]](s"TestConsumer-$idCount")

View file

@ -29,6 +29,7 @@ import akka.cluster.sharding.typed.scaladsl.Entity
import akka.cluster.sharding.typed.scaladsl.EntityTypeKey
import akka.cluster.typed.Cluster
import akka.cluster.typed.Join
import akka.testkit.GHExcludeTest
object ReliableDeliveryShardingSpec {
val config = ConfigFactory.parseString("""
@ -335,7 +336,8 @@ class ReliableDeliveryShardingSpec
testKit.stop(shardingProducerController)
}
"deliver unconfirmed if ShardingConsumerController is terminated" in {
// FIXME issue https://github.com/akka/akka/issues/30567
"deliver unconfirmed if ShardingConsumerController is terminated" taggedAs GHExcludeTest in {
// for example if ShardingConsumerController is rebalanced, but no more messages are sent to the entity
nextId()

View file

@ -20,6 +20,8 @@ import akka.testkit.WithLogCapturing
import com.typesafe.config.ConfigFactory
import org.scalatest.wordspec.AnyWordSpecLike
import akka.testkit.GHExcludeTest
object PersistentStartEntitySpec {
class EntityActor extends Actor {
override def receive: Receive = {
@ -71,7 +73,8 @@ class PersistentStartEntitySpec
"Persistent Shard" must {
"remember entities started with StartEntity" in {
// FIXME https://github.com/akka/akka/issues/30393
"remember entities started with StartEntity" taggedAs GHExcludeTest in {
val sharding = ClusterSharding(system).start(
s"startEntity",
Props[EntityActor](),

View file

@ -15,8 +15,6 @@ import akka.testkit.WithLogCapturing
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._
import akka.testkit.GHExcludeTest
/**
* Covers some corner cases around sending triggering an entity with StartEntity
*/
@ -119,7 +117,7 @@ class StartEntitySpec extends AkkaSpec(StartEntitySpec.config) with ImplicitSend
// entity crashed and before restart-backoff hit we sent it a StartEntity
"StartEntity while the entity is waiting for restart" should {
"restart it immediately" taggedAs GHExcludeTest in {
"restart it immediately" in {
val sharding = ClusterSharding(system).start(
"start-entity-2",
EntityActor.props(),

View file

@ -9,7 +9,7 @@ import scala.concurrent.duration._
import com.typesafe.config.{ Config, ConfigFactory }
import akka.testkit.LongRunningTest
import akka.testkit.GHExcludeTest
object JoinConfigCompatCheckerRollingUpdateSpec {
@ -47,16 +47,17 @@ class JoinConfigCompatCheckerRollingUpdateSpec
import JoinConfigCompatCheckerRollingUpdateSpec._
// FIXME https://github.com/akka/akka/issues/30939 (tag as LongRunningTest instead when fixed)
"A Node" must {
val timeout = 20.seconds
"NOT be allowed to re-join a cluster if it has a new, additional configuration the others do not have and not the old" taggedAs LongRunningTest in {
"NOT be allowed to re-join a cluster if it has a new, additional configuration the others do not have and not the old" taggedAs GHExcludeTest in {
// confirms the 2 attempted re-joins fail with both nodes being terminated
upgradeCluster(3, v1Config, v2ConfigIncompatible, timeout, timeout, enforced = true, shouldRejoin = false)
}
"be allowed to re-join a cluster if it has a new, additional property and checker the others do not have" taggedAs LongRunningTest in {
"be allowed to re-join a cluster if it has a new, additional property and checker the others do not have" taggedAs GHExcludeTest in {
upgradeCluster(3, v1Config, v2Config, timeout, timeout * 3, enforced = true, shouldRejoin = true)
}
"be allowed to re-join a cluster if it has a new, additional configuration the others do not have and configured to NOT enforce it" taggedAs LongRunningTest in {
"be allowed to re-join a cluster if it has a new, additional configuration the others do not have and configured to NOT enforce it" taggedAs GHExcludeTest in {
upgradeCluster(3, v1Config, v2Config, timeout, timeout * 3, enforced = false, shouldRejoin = true)
}
}

View file

@ -9,6 +9,7 @@ import scala.concurrent.duration._
import com.typesafe.config.{ Config, ConfigFactory }
import akka.testkit.GHExcludeTest
import akka.testkit.{ AkkaSpec, LongRunningTest }
object JoinConfigCompatCheckerSpec {
@ -363,7 +364,8 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit {
}
"NOT be allowed to re-join a cluster when one of its required properties are not available on cluster side" taggedAs LongRunningTest in {
// FIXME https://github.com/akka/akka/issues/30843 (tag as LongRunningTest instead when fixed)
"NOT be allowed to re-join a cluster when one of its required properties are not available on cluster side" taggedAs GHExcludeTest in {
// this config is NOT compatible with the cluster config
// because there is one missing required configuration property.

View file

@ -33,7 +33,6 @@ import akka.stream.testkit.scaladsl.TestSink
import akka.stream.testkit.scaladsl.TestSource
import akka.testkit.DefaultTimeout
import akka.testkit.EventFilter
import akka.testkit.GHExcludeTest
import akka.testkit.TestDuration
class RestartSpec
@ -319,8 +318,7 @@ class RestartSpec
probe.cancel()
}
// https://github.com/akka/akka/issues/30540
"allow using withMaxRestarts instead of minBackoff to determine the maxRestarts reset time" taggedAs GHExcludeTest in assertAllStagesStopped {
"allow using withMaxRestarts instead of minBackoff to determine the maxRestarts reset time" in assertAllStagesStopped {
val created = new AtomicInteger()
val probe = RestartSource
.withBackoff(shortRestartSettings.withMaxRestarts(2, 1.second)) { () =>
@ -562,7 +560,7 @@ class RestartSpec
probe.sendComplete()
}
"allow using withMaxRestarts instead of minBackoff to determine the maxRestarts reset time" taggedAs GHExcludeTest in assertAllStagesStopped {
"allow using withMaxRestarts instead of minBackoff to determine the maxRestarts reset time" in assertAllStagesStopped {
val created = new AtomicInteger()
val (queue, sinkProbe) = TestSource.probe[String].toMat(TestSink.probe)(Keep.both).run()
val probe = TestSource

View file

@ -21,6 +21,7 @@ import akka.stream.impl.streamref.{ SinkRefImpl, SourceRefImpl }
import akka.stream.testkit.TestPublisher
import akka.stream.testkit.Utils.TE
import akka.stream.testkit.scaladsl._
import akka.testkit.GHExcludeTest
import akka.testkit.{ AkkaSpec, TestKit, TestProbe }
import akka.util.ByteString
@ -348,7 +349,8 @@ class StreamRefsSpec extends AkkaSpec(StreamRefsSpec.config()) {
remoteProbe.expectMsg(Done)
}
"pass cancellation upstream across remoting before elements has been emitted" in {
// FIXME https://github.com/akka/akka/issues/30844
"pass cancellation upstream across remoting before elements has been emitted" taggedAs GHExcludeTest in {
val remoteProbe = TestProbe()(remoteSystem)
remoteActor.tell("give-nothing-watch", remoteProbe.ref)
val sourceRef = remoteProbe.expectMsgType[SourceRef[String]]