chore: scheduled link validation (#31086)

This commit is contained in:
Enno Runne 2022-01-25 19:18:05 +01:00 committed by GitHub
parent 7ff3d9fbff
commit bdcb962e4c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 125 additions and 27 deletions

37
.github/workflows/link-validator.yml vendored Normal file
View file

@ -0,0 +1,37 @@
name: Link Validator
on:
schedule:
- cron: '0 6 * * 1'
workflow_dispatch:
jobs:
validate-links:
runs-on: ubuntu-20.04
if: github.repository == 'akka/akka'
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Checkout GitHub merge
if: github.event.pull_request
run: |-
git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch
git checkout scratch
- name: Set up JDK 11
uses: olafurpg/setup-scala@v13
with:
java-version: adopt@1.11
- name: Cache Coursier cache
uses: coursier/cache-action@v6.2
- name: create the Akka site
run: sbt -Dakka.genjavadoc.enabled=true "Javaunidoc/doc; Compile/unidoc; akka-docs/paradox"
- name: Install Coursier command line tool
run: curl -fLo cs https://git.io/coursier-cli-linux && chmod +x cs && ./cs
- name: Run Link Validator
run: ./cs launch net.runne::site-link-validator:0.2.2 -- scripts/link-validator.conf

View file

@ -1184,8 +1184,7 @@ object Replicator {
* </ul>
*
* For good introduction to the CRDT subject watch the
* <a href="http://www.ustream.tv/recorded/61448875">The Final Causal Frontier</a>
* and <a href="https://www.infoq.com/presentations/CRDT/">Eventually Consistent Data Structures</a>
* <a href="https://www.infoq.com/presentations/CRDT/">Eventually Consistent Data Structures</a>
* talk by Sean Cribbs and and the
* <a href="https://www.microsoft.com/en-us/research/video/strong-eventual-consistency-and-conflict-free-replicated-data-types/">talk by Mark Shapiro</a>
* and read the excellent paper <a href="https://hal.inria.fr/file/index/docid/555588/filename/techreport.pdf">

View file

@ -836,7 +836,7 @@ That has benefits such as:
The `Receive` can be implemented in other ways than using the `ReceiveBuilder` since in the
end, it is just a wrapper around a Scala `PartialFunction`. In Java, you can implement `PartialFunction` by
extending `AbstractPartialFunction`. For example, one could implement an adapter
to [Vavr Pattern Matching DSL](https://www.vavr.io/vavr-docs/#_pattern_matching). See the [Akka Vavr sample project](https://github.com/akka/akka-samples/tree/2.5/akka-sample-vavr) for more details.
to [Vavr Pattern Matching DSL](https://docs.vavr.io/#_pattern_matching). See the [Akka Vavr sample project](https://github.com/akka/akka-samples/tree/2.5/akka-sample-vavr) for more details.
If the validation of the `ReceiveBuilder` match logic turns out to be a bottleneck for some of your
actors you can consider implementing it at a lower level by extending `UntypedAbstractActor` instead

View file

@ -383,7 +383,7 @@ There are several management tools for the cluster. Please refer to the
@@@ warning
**Deprecation warning** - The command line script has been deprecated and is scheduled for removal
in the next major version. Use the @ref:[HTTP management](additional/operations.md#http) API with [curl](https://curl.haxx.se/)
in the next major version. Use the @ref:[HTTP management](additional/operations.md#http) API with [curl](https://curl.se/)
or similar instead.
@@@

View file

@ -344,8 +344,8 @@ stdout logger is `WARNING` and it can be silenced completely by setting
## SLF4J
Akka provides a logger for [SLF4J](http://www.slf4j.org/). This module is available in the 'akka-slf4j.jar'.
It has a single dependency: the slf4j-api jar. In your runtime, you also need a SLF4J backend. We recommend [Logback](http://logback.qos.ch/):
Akka provides a logger for [SLF4J](https://www.slf4j.org/). This module is available in the 'akka-slf4j.jar'.
It has a single dependency: the slf4j-api jar. In your runtime, you also need a SLF4J backend. We recommend [Logback](https://logback.qos.ch/):
@@dependency[sbt,Maven,Gradle] {
bomGroup=com.typesafe.akka bomArtifact=akka-bom_$scala.binary.version$ bomVersionSymbols=AkkaVersion
@ -419,7 +419,7 @@ If you use the SLF4J API directly in your application, remember that the logging
while the underlying infrastructure writes the log statements.
This can be avoided by configuring the logging implementation to use
a non-blocking appender. Logback provides [AsyncAppender](http://logback.qos.ch/manual/appenders.html#AsyncAppender)
a non-blocking appender. Logback provides [AsyncAppender](https://logback.qos.ch/manual/appenders.html#AsyncAppender)
that does this.
### Logback configuration
@ -427,7 +427,7 @@ that does this.
Logback has flexible configuration options and details can be found in the
[Logback manual](https://logback.qos.ch/manual/configuration.html) and other external resources.
One part that is important to highlight is the importance of configuring an [AsyncAppender](http://logback.qos.ch/manual/appenders.html#AsyncAppender),
One part that is important to highlight is the importance of configuring an [AsyncAppender](https://logback.qos.ch/manual/appenders.html#AsyncAppender),
because it offloads rendering of logging events to a background thread, increasing performance. It doesn't block
the threads of the `ActorSystem` while the underlying infrastructure writes the log messages to disk or other configured
destination. It also contains a feature which will drop `INFO` and `DEBUG` messages if the logging
@ -446,7 +446,7 @@ Place the `logback.xml` file in `src/main/resources/logback.xml`. For tests you
logging configuration in `src/test/resources/logback-test.xml`.
MDC properties can be included in the Logback output with for example `%X{akkaSource}` specifier within the
[pattern layout configuration](http://logback.qos.ch/manual/layouts.html#mdc):
[pattern layout configuration](https://logback.qos.ch/manual/layouts.html#mdc):
```
<encoder>
@ -496,7 +496,7 @@ If you want to more accurately output the timestamp, use the MDC attribute `akka
### MDC values defined by the application
One useful feature available in Slf4j is [MDC](http://logback.qos.ch/manual/mdc.html),
One useful feature available in Slf4j is [MDC](https://logback.qos.ch/manual/mdc.html),
Akka has a way to let the application specify custom values, for this you need to use a
specialized `LoggingAdapter`, the `DiagnosticLoggingAdapter`. In order to
get it you can use the factory, providing an @scala[Actor] @java[AbstractActor] as logSource:

View file

@ -603,7 +603,7 @@ Java
The ConsistentHashingPool uses [consistent hashing](https://en.wikipedia.org/wiki/Consistent_hashing)
to select a routee based on the sent message. This
[article](https://www.tom-e-white.com/2007/11/consistent-hashing.html) gives good
[article](https://tom-e-white.com/2007/11/consistent-hashing.html) gives good
insight into how consistent hashing is implemented.
There is 3 ways to define what data to use for the consistent hash key.
@ -755,13 +755,6 @@ continue to process its messages as normal, eventually processing the `PoisonPil
cause the routee to stop. After all routees have stopped the router will itself be stopped
automatically unless it is a dynamic router, e.g. using a resizer.
@@@ note
Brendan W McAdams' excellent blog post [Distributing Akka Workloads - And Shutting Down Afterwards](http://bytes.codes/2013/01/17/Distributing_Akka_Workloads_And_Shutting_Down_After/)
discusses in more detail how `PoisonPill` messages can be used to shut down routers and routees.
@@@
### Kill Messages
`Kill` messages are another type of message that has special handling. See

View file

@ -65,7 +65,7 @@ It reduces the need for some annotations.
## Security
For security reasons it is disallowed to bind the Jackson serializers to
open ended types that might be a target for [serialization gadgets](https://medium.com/@cowtowncoder/on-jackson-cves-dont-panic-here-is-what-you-need-to-know-54cd0d6e8062),
open ended types that might be a target for [serialization gadgets](https://cowtowncoder.medium.com/on-jackson-cves-dont-panic-here-is-what-you-need-to-know-54cd0d6e8062),
such as:
* @javadoc[java.lang.Object](java.lang.Object)

View file

@ -1,7 +1,7 @@
# Split Brain Resolver
When operating an Akka cluster you must consider how to handle
[network partitions](http://en.wikipedia.org/wiki/Network_partition) (a.k.a. split brain scenarios)
[network partitions](https://en.wikipedia.org/wiki/Network_partition) (a.k.a. split brain scenarios)
and machine crashes (including JVM and hardware failures). This is crucial for correct behavior if
you use @ref:[Cluster Singleton](typed/cluster-singleton.md) or @ref:[Cluster Sharding](typed/cluster-sharding.md),
especially together with Akka Persistence.

View file

@ -760,7 +760,7 @@ akka {
## Different Testing Frameworks
Akkas test suite is written using [ScalaTest](http://www.scalatest.org),
Akkas test suite is written using [ScalaTest](https://www.scalatest.org),
which also shines through in documentation examples. However, the TestKit and
its facilities do not depend on that framework, so you can essentially use
whichever suits your development style best.

View file

@ -31,7 +31,7 @@ and membership state transitions.
### Gossip
The cluster membership used in Akka is based on Amazon's [Dynamo](https://www.allthingsdistributed.com/files/amazon-dynamo-sosp2007.pdf) system and
particularly the approach taken in Basho's' [Riak](https://riak.com/technology/architecture/) distributed database.
particularly the approach taken in Basho's' [Riak](https://en.wikipedia.org/wiki/Riak) distributed database.
Cluster membership is communicated using a [Gossip Protocol](https://en.wikipedia.org/wiki/Gossip_protocol), where the current
state of the cluster is gossiped randomly through the cluster, with preference to
members that have not seen the latest version.

View file

@ -427,7 +427,7 @@ where multiple destination actors can perform the same piece of work, and where
more slowly than expected. In this case, sending the same work request (also known as a "backup request")
to another actor results in decreased response time - because it's less probable that multiple actors
are under heavy load simultaneously. This technique is explained in depth in Jeff Dean's presentation on
[Achieving Rapid Response Times in Large Online Services](http://static.googleusercontent.com/media/research.google.com/en//people/jeff/Berkeley-Latency-Mar2012.pdf).
[Achieving Rapid Response Times in Large Online Services](https://static.googleusercontent.com/media/research.google.com/en//people/jeff/Berkeley-Latency-Mar2012.pdf).
There are many variations of this pattern and that is the reason this is provided as a documentation
example rather than a built in `Behavior` in Akka. It is intended to be adjusted to your specific needs.

View file

@ -129,7 +129,7 @@ Java
## MDC
[MDC](http://logback.qos.ch/manual/mdc.html) allows for adding additional context dependent attributes to log entries.
[MDC](https://logback.qos.ch/manual/mdc.html) allows for adding additional context dependent attributes to log entries.
Out of the box, Akka will place the path of the actor in the the MDC attribute `akkaSource`.
One or more tags can also be added to the MDC using the @apidoc[ActorTags$] props. The tags will be rendered as a comma separated
@ -178,7 +178,7 @@ that are running actors and other tasks.
### Logback
`akka-actor-typed` includes a dependency to the `slf4j-api`. In your runtime, you also need a SLF4J backend.
We recommend [Logback](http://logback.qos.ch/):
We recommend [Logback](https://logback.qos.ch/):
@@dependency[sbt,Maven,Gradle] {
group="ch.qos.logback"
@ -189,7 +189,7 @@ We recommend [Logback](http://logback.qos.ch/):
Logback has flexible configuration options and details can be found in the
[Logback manual](https://logback.qos.ch/manual/configuration.html) and other external resources.
One part that is important to highlight is the importance of configuring an [AsyncAppender](http://logback.qos.ch/manual/appenders.html#AsyncAppender),
One part that is important to highlight is the importance of configuring an [AsyncAppender](https://logback.qos.ch/manual/appenders.html#AsyncAppender),
because it offloads rendering of logging events to a background thread, increasing performance. It doesn't block
the threads of the @apidoc[typed.ActorSystem] while the underlying infrastructure writes the log messages to disk or other configured
destination. It also contains a feature which will drop `INFO` and `DEBUG` messages if the logging
@ -232,7 +232,7 @@ When logging via the @scala[@scaladoc[log](akka.actor.typed.scaladsl.ActorConte
* `sourceActorSystem`: the name of the ActorSystem
These MDC properties can be included in the Logback output with for example `%X{akkaSource}` specifier within the
[pattern layout configuration](http://logback.qos.ch/manual/layouts.html#mdc):
[pattern layout configuration](https://logback.qos.ch/manual/layouts.html#mdc):
```
<encoder>

View file

@ -0,0 +1,69 @@
// config for https://github.com/ennru/site-link-validator/
site-link-validator {
root-dir = "akka-docs/target/paradox/site/main/"
# relative to `root-dir`
start-file = "index.html"
# Resolves URLs with the given prefix as local files instead
link-mappings = [
{
prefix = "https://doc.akka.io/docs/akka/current/"
replace = ""
}
# ScalaDoc from unidoc
{
prefix = "https://doc.akka.io/api/akka/2.6/"
replace = "/../../../../../target/scala-2.13/unidoc/"
}
{
prefix = "https://doc.akka.io/api/akka/current/"
replace = "/../../../../../target/scala-2.13/unidoc/"
}
{
prefix = "https://doc.akka.io/api/akka/snapshot/"
replace = "/../../../../../target/scala-2.13/unidoc/"
}
# Java APIs from genjavadoc
{
prefix = "https://doc.akka.io/japi/akka/2.6/"
replace = "/../../../../../target/javaunidoc/"
}
{
prefix = "https://doc.akka.io/japi/akka/current/"
replace = "/../../../../../target/javaunidoc/"
}
{
prefix = "https://doc.akka.io/japi/akka/snapshot/"
replace = "/../../../../../target/javaunidoc/"
}
]
ignore-missing-local-files-regex = ""
// e.g. "^api/alpakka/snapshot/akka/stream/alpakka/googlecloud/storage/impl/Formats.*"
ignore-prefixes = [
# Fails after a number of requests with "403 Forbidden"
"https://javadoc.io/static/"
# GitHub will block with "429 Too Many Requests"
"https://github.com/"
"https://www.scala-lang.org/api/2.13.7/scala/runtime/AbstractFunction1.html"
"https://www.scala-lang.org/api/2.13.7/scala/runtime/AbstractFunction2.html"
"https://www.scala-lang.org/api/2.13.7/scala/runtime/AbstractFunction3.html"
"https://www.scala-lang.org/api/2.13.7/scala/runtime/AbstractPartialFunction.html"
]
non-https-whitelist = [
"http://cidrdb.org/cidr2015/Papers/CIDR15_Paper16.pdf"
"http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.41.7628"
"http://spray.io"
"http://www.bailis.org/blog/doing-redundant-work-to-speed-up-distributed-queries/"
"http://www.cs.columbia.edu/~nahum/w6998/papers/sosp87-timing-wheels.pdf"
# document not available anymore
"http://www.jaist.ac.jp/~defago/files/pdf/IS_RR_2004_010.pdf"
# Sigar site has been removed
"http://www.hyperic.com/products/sigar"
"http://www.tom-e-white.com/2007/11/consistent-hashing.html"
# genereated by @apidoc
"http://www.scala-lang.org/api/2.13."
]
}