+str #24229 move SinkRef / SourceRef to akka.stream

+str #24229 remove protobuf changes, which do not need to be made in this PR

docs

moved things

config object

subscription timeout confifmed working, also, attributes

document attributes for sub timeout

tests for the source also failing when it should

additional demand test

implemented protection from materializing "in cycles"; would be nice in
types but that breaks the niceness of use of the types
SinkRef/SourceRef...

cleanup

no idle timeout built in, can use the Timeout stages

more docs

simplest change to prevent exposing SinkRef => SourceRef => SinkRef cycle

Things to decide:
 * is it ok to require using `getSource` / `getSink` as Java API, is there better naming?
 * where should the constructors go? I'd say just in regular javadsl/scaladsl `Source`/ `Sink` objects

move constructors to {javadsl,scaladsl}.{Source,Sink} companion objects

Remove now useless "canMaterialize" field

Separate stage (implementation) from ref (wrapped actor ref) to make it clearer what is serialized

Clarify that partner refs are not optional in on-the-wire interfaces

minor cleanup in SourceRefStage

Renamed the stages but questionable if that really helps ;)

cleanups, better docs

cleanup, fix docs compilation

fix mima

got rid of Futures in the materialized values of stream refs
This commit is contained in:
Konrad `ktoso` Malawski 2018-01-04 17:21:47 +01:00
parent bc6861f7e4
commit 7c75abbf7e
42 changed files with 2834 additions and 4386 deletions

View file

@ -9,12 +9,10 @@ import scala.util.control.NonFatal
import scala.collection.immutable
import akka.actor._
import akka.serialization.SerializationExtension
import akka.util.{ Helpers, Unsafe }
import akka.util.{ Unsafe, Helpers }
import akka.serialization.SerializerWithStringManifest
import java.util.Optional
import akka.event.Logging
private[akka] object Children {
val GetNobody = () Nobody
}
@ -194,8 +192,7 @@ private[akka] trait Children { this: ActorCell ⇒
protected def getAllChildStats: immutable.Iterable[ChildRestartStats] = childrenRefs.stats
override def getSingleChild(name: String): InternalActorRef = {
override def getSingleChild(name: String): InternalActorRef =
if (name.indexOf('#') == -1) {
// optimization for the non-uid case
getChildByName(name) match {
@ -210,7 +207,6 @@ private[akka] trait Children { this: ActorCell ⇒
case _ getFunctionRefOrNobody(childName, uid)
}
}
}
protected def removeChildAndGetStateChange(child: ActorRef): Option[SuspendReason] = {
@tailrec def removeChild(ref: ActorRef): ChildrenContainer = {

View file

@ -72,6 +72,7 @@ trait PipeToSupport {
*
* {{{
* import akka.pattern.pipe
* // requires implicit ExecutionContext, e.g. by importing `context.dispatcher` inside an Actor
*
* Future { doExpensiveCalc() } pipeTo nextActor
*
@ -91,6 +92,7 @@ trait PipeToSupport {
*
* {{{
* import akka.pattern.pipe
* // requires implicit ExecutionContext, e.g. by importing `context.dispatcher` inside an Actor
*
* Future { doExpensiveCalc() } pipeTo nextActor
*

View file

@ -1,823 +0,0 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: FlightAppModels.proto
package docs.persistence.proto;
public final class FlightAppModels {
private FlightAppModels() {}
public static void registerAllExtensions(
akka.protobuf.ExtensionRegistry registry) {
}
public interface SeatReservedOrBuilder
extends akka.protobuf.MessageOrBuilder {
// required string letter = 1;
/**
* <code>required string letter = 1;</code>
*/
boolean hasLetter();
/**
* <code>required string letter = 1;</code>
*/
java.lang.String getLetter();
/**
* <code>required string letter = 1;</code>
*/
akka.protobuf.ByteString
getLetterBytes();
// required uint32 row = 2;
/**
* <code>required uint32 row = 2;</code>
*/
boolean hasRow();
/**
* <code>required uint32 row = 2;</code>
*/
int getRow();
// optional string seatType = 3;
/**
* <code>optional string seatType = 3;</code>
*
* <pre>
* the new field
* </pre>
*/
boolean hasSeatType();
/**
* <code>optional string seatType = 3;</code>
*
* <pre>
* the new field
* </pre>
*/
java.lang.String getSeatType();
/**
* <code>optional string seatType = 3;</code>
*
* <pre>
* the new field
* </pre>
*/
akka.protobuf.ByteString
getSeatTypeBytes();
}
/**
* Protobuf type {@code docs.persistence.SeatReserved}
*/
public static final class SeatReserved extends
akka.protobuf.GeneratedMessage
implements SeatReservedOrBuilder {
// Use SeatReserved.newBuilder() to construct.
private SeatReserved(akka.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private SeatReserved(boolean noInit) { this.unknownFields = akka.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final SeatReserved defaultInstance;
public static SeatReserved getDefaultInstance() {
return defaultInstance;
}
public SeatReserved getDefaultInstanceForType() {
return defaultInstance;
}
private final akka.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final akka.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private SeatReserved(
akka.protobuf.CodedInputStream input,
akka.protobuf.ExtensionRegistryLite extensionRegistry)
throws akka.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
akka.protobuf.UnknownFieldSet.Builder unknownFields =
akka.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
letter_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
row_ = input.readUInt32();
break;
}
case 26: {
bitField0_ |= 0x00000004;
seatType_ = input.readBytes();
break;
}
}
}
} catch (akka.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new akka.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final akka.protobuf.Descriptors.Descriptor
getDescriptor() {
return docs.persistence.proto.FlightAppModels.internal_static_docs_persistence_SeatReserved_descriptor;
}
protected akka.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return docs.persistence.proto.FlightAppModels.internal_static_docs_persistence_SeatReserved_fieldAccessorTable
.ensureFieldAccessorsInitialized(
docs.persistence.proto.FlightAppModels.SeatReserved.class, docs.persistence.proto.FlightAppModels.SeatReserved.Builder.class);
}
public static akka.protobuf.Parser<SeatReserved> PARSER =
new akka.protobuf.AbstractParser<SeatReserved>() {
public SeatReserved parsePartialFrom(
akka.protobuf.CodedInputStream input,
akka.protobuf.ExtensionRegistryLite extensionRegistry)
throws akka.protobuf.InvalidProtocolBufferException {
return new SeatReserved(input, extensionRegistry);
}
};
@java.lang.Override
public akka.protobuf.Parser<SeatReserved> getParserForType() {
return PARSER;
}
private int bitField0_;
// required string letter = 1;
public static final int LETTER_FIELD_NUMBER = 1;
private java.lang.Object letter_;
/**
* <code>required string letter = 1;</code>
*/
public boolean hasLetter() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required string letter = 1;</code>
*/
public java.lang.String getLetter() {
java.lang.Object ref = letter_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
akka.protobuf.ByteString bs =
(akka.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
letter_ = s;
}
return s;
}
}
/**
* <code>required string letter = 1;</code>
*/
public akka.protobuf.ByteString
getLetterBytes() {
java.lang.Object ref = letter_;
if (ref instanceof java.lang.String) {
akka.protobuf.ByteString b =
akka.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
letter_ = b;
return b;
} else {
return (akka.protobuf.ByteString) ref;
}
}
// required uint32 row = 2;
public static final int ROW_FIELD_NUMBER = 2;
private int row_;
/**
* <code>required uint32 row = 2;</code>
*/
public boolean hasRow() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required uint32 row = 2;</code>
*/
public int getRow() {
return row_;
}
// optional string seatType = 3;
public static final int SEATTYPE_FIELD_NUMBER = 3;
private java.lang.Object seatType_;
/**
* <code>optional string seatType = 3;</code>
*
* <pre>
* the new field
* </pre>
*/
public boolean hasSeatType() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional string seatType = 3;</code>
*
* <pre>
* the new field
* </pre>
*/
public java.lang.String getSeatType() {
java.lang.Object ref = seatType_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
akka.protobuf.ByteString bs =
(akka.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
seatType_ = s;
}
return s;
}
}
/**
* <code>optional string seatType = 3;</code>
*
* <pre>
* the new field
* </pre>
*/
public akka.protobuf.ByteString
getSeatTypeBytes() {
java.lang.Object ref = seatType_;
if (ref instanceof java.lang.String) {
akka.protobuf.ByteString b =
akka.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
seatType_ = b;
return b;
} else {
return (akka.protobuf.ByteString) ref;
}
}
private void initFields() {
letter_ = "";
row_ = 0;
seatType_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasLetter()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasRow()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(akka.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getLetterBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, row_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getSeatTypeBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += akka.protobuf.CodedOutputStream
.computeBytesSize(1, getLetterBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += akka.protobuf.CodedOutputStream
.computeUInt32Size(2, row_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += akka.protobuf.CodedOutputStream
.computeBytesSize(3, getSeatTypeBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(
akka.protobuf.ByteString data)
throws akka.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(
akka.protobuf.ByteString data,
akka.protobuf.ExtensionRegistryLite extensionRegistry)
throws akka.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(byte[] data)
throws akka.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(
byte[] data,
akka.protobuf.ExtensionRegistryLite extensionRegistry)
throws akka.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(
java.io.InputStream input,
akka.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static docs.persistence.proto.FlightAppModels.SeatReserved parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static docs.persistence.proto.FlightAppModels.SeatReserved parseDelimitedFrom(
java.io.InputStream input,
akka.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(
akka.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(
akka.protobuf.CodedInputStream input,
akka.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(docs.persistence.proto.FlightAppModels.SeatReserved prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
akka.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code docs.persistence.SeatReserved}
*/
public static final class Builder extends
akka.protobuf.GeneratedMessage.Builder<Builder>
implements docs.persistence.proto.FlightAppModels.SeatReservedOrBuilder {
public static final akka.protobuf.Descriptors.Descriptor
getDescriptor() {
return docs.persistence.proto.FlightAppModels.internal_static_docs_persistence_SeatReserved_descriptor;
}
protected akka.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return docs.persistence.proto.FlightAppModels.internal_static_docs_persistence_SeatReserved_fieldAccessorTable
.ensureFieldAccessorsInitialized(
docs.persistence.proto.FlightAppModels.SeatReserved.class, docs.persistence.proto.FlightAppModels.SeatReserved.Builder.class);
}
// Construct using docs.persistence.proto.FlightAppModels.SeatReserved.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
akka.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (akka.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
letter_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
row_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
seatType_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public akka.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return docs.persistence.proto.FlightAppModels.internal_static_docs_persistence_SeatReserved_descriptor;
}
public docs.persistence.proto.FlightAppModels.SeatReserved getDefaultInstanceForType() {
return docs.persistence.proto.FlightAppModels.SeatReserved.getDefaultInstance();
}
public docs.persistence.proto.FlightAppModels.SeatReserved build() {
docs.persistence.proto.FlightAppModels.SeatReserved result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public docs.persistence.proto.FlightAppModels.SeatReserved buildPartial() {
docs.persistence.proto.FlightAppModels.SeatReserved result = new docs.persistence.proto.FlightAppModels.SeatReserved(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.letter_ = letter_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.row_ = row_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.seatType_ = seatType_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(akka.protobuf.Message other) {
if (other instanceof docs.persistence.proto.FlightAppModels.SeatReserved) {
return mergeFrom((docs.persistence.proto.FlightAppModels.SeatReserved)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(docs.persistence.proto.FlightAppModels.SeatReserved other) {
if (other == docs.persistence.proto.FlightAppModels.SeatReserved.getDefaultInstance()) return this;
if (other.hasLetter()) {
bitField0_ |= 0x00000001;
letter_ = other.letter_;
onChanged();
}
if (other.hasRow()) {
setRow(other.getRow());
}
if (other.hasSeatType()) {
bitField0_ |= 0x00000004;
seatType_ = other.seatType_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasLetter()) {
return false;
}
if (!hasRow()) {
return false;
}
return true;
}
public Builder mergeFrom(
akka.protobuf.CodedInputStream input,
akka.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
docs.persistence.proto.FlightAppModels.SeatReserved parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (akka.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (docs.persistence.proto.FlightAppModels.SeatReserved) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string letter = 1;
private java.lang.Object letter_ = "";
/**
* <code>required string letter = 1;</code>
*/
public boolean hasLetter() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required string letter = 1;</code>
*/
public java.lang.String getLetter() {
java.lang.Object ref = letter_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((akka.protobuf.ByteString) ref)
.toStringUtf8();
letter_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <code>required string letter = 1;</code>
*/
public akka.protobuf.ByteString
getLetterBytes() {
java.lang.Object ref = letter_;
if (ref instanceof String) {
akka.protobuf.ByteString b =
akka.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
letter_ = b;
return b;
} else {
return (akka.protobuf.ByteString) ref;
}
}
/**
* <code>required string letter = 1;</code>
*/
public Builder setLetter(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
letter_ = value;
onChanged();
return this;
}
/**
* <code>required string letter = 1;</code>
*/
public Builder clearLetter() {
bitField0_ = (bitField0_ & ~0x00000001);
letter_ = getDefaultInstance().getLetter();
onChanged();
return this;
}
/**
* <code>required string letter = 1;</code>
*/
public Builder setLetterBytes(
akka.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
letter_ = value;
onChanged();
return this;
}
// required uint32 row = 2;
private int row_ ;
/**
* <code>required uint32 row = 2;</code>
*/
public boolean hasRow() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required uint32 row = 2;</code>
*/
public int getRow() {
return row_;
}
/**
* <code>required uint32 row = 2;</code>
*/
public Builder setRow(int value) {
bitField0_ |= 0x00000002;
row_ = value;
onChanged();
return this;
}
/**
* <code>required uint32 row = 2;</code>
*/
public Builder clearRow() {
bitField0_ = (bitField0_ & ~0x00000002);
row_ = 0;
onChanged();
return this;
}
// optional string seatType = 3;
private java.lang.Object seatType_ = "";
/**
* <code>optional string seatType = 3;</code>
*
* <pre>
* the new field
* </pre>
*/
public boolean hasSeatType() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional string seatType = 3;</code>
*
* <pre>
* the new field
* </pre>
*/
public java.lang.String getSeatType() {
java.lang.Object ref = seatType_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((akka.protobuf.ByteString) ref)
.toStringUtf8();
seatType_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <code>optional string seatType = 3;</code>
*
* <pre>
* the new field
* </pre>
*/
public akka.protobuf.ByteString
getSeatTypeBytes() {
java.lang.Object ref = seatType_;
if (ref instanceof String) {
akka.protobuf.ByteString b =
akka.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
seatType_ = b;
return b;
} else {
return (akka.protobuf.ByteString) ref;
}
}
/**
* <code>optional string seatType = 3;</code>
*
* <pre>
* the new field
* </pre>
*/
public Builder setSeatType(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
seatType_ = value;
onChanged();
return this;
}
/**
* <code>optional string seatType = 3;</code>
*
* <pre>
* the new field
* </pre>
*/
public Builder clearSeatType() {
bitField0_ = (bitField0_ & ~0x00000004);
seatType_ = getDefaultInstance().getSeatType();
onChanged();
return this;
}
/**
* <code>optional string seatType = 3;</code>
*
* <pre>
* the new field
* </pre>
*/
public Builder setSeatTypeBytes(
akka.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
seatType_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:docs.persistence.SeatReserved)
}
static {
defaultInstance = new SeatReserved(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:docs.persistence.SeatReserved)
}
private static akka.protobuf.Descriptors.Descriptor
internal_static_docs_persistence_SeatReserved_descriptor;
private static
akka.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_docs_persistence_SeatReserved_fieldAccessorTable;
public static akka.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static akka.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\025FlightAppModels.proto\022\020docs.persistenc" +
"e\"=\n\014SeatReserved\022\016\n\006letter\030\001 \002(\t\022\013\n\003row" +
"\030\002 \002(\r\022\020\n\010seatType\030\003 \001(\tB\032\n\026docs.persist" +
"ence.protoH\001"
};
akka.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new akka.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public akka.protobuf.ExtensionRegistry assignDescriptors(
akka.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_docs_persistence_SeatReserved_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_docs_persistence_SeatReserved_fieldAccessorTable = new
akka.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_docs_persistence_SeatReserved_descriptor,
new java.lang.String[] { "Letter", "Row", "SeatType", });
return null;
}
};
akka.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new akka.protobuf.Descriptors.FileDescriptor[] {
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 77 KiB

View file

@ -16,6 +16,7 @@
* [stream-integrations](stream-integrations.md)
* [stream-error](stream-error.md)
* [stream-io](stream-io.md)
* [stream-refs](stream-refs.md)
* [stream-parallelism](stream-parallelism.md)
* [stream-testkit](stream-testkit.md)
* [stages-overview](stages-overview.md)

View file

@ -0,0 +1,166 @@
# Akka Streams over network boundaries
Stream references, or "stream refs" for short, allow running Akka Streams across multiple nodes within
an Akka Cluster.
Unlike heavier "streaming data processing" frameworks, Akka Streams are not "deployed" nor automatically distributed.
Akka stream refs are, as the name implies, references to existing parts of a stream, and can be used to create a
distributed processing framework or introduce such capabilities in specific parts of your application, however they
are not on that level of abstraction by themselves.
Stream refs are trivial to make use of in existing clustered Akka applications, and require no additional configuration
or setup. They automatically maintain flow-control / back-pressure over the network, and employ Akka's failure detection
mechanisms to fail-fast ("let it crash!") in the case of failures of remote nodes. They can be seen as an implementation
of the [Work Pulling Pattern](http://www.michaelpollmeier.com/akka-work-pulling-pattern), which one would otherwise
implement manually.
@@@ note
A useful way to think about stream refs is:
"like an `ActorRef`, but for Akka Streams's `Source` and `Sink`".
Since they refer to an already existing, possibly remote, `Sink` or `Source`.
This is not to be mistaken with deploying streams remotely, which this feature is not intended for.
@@@
@@@ note
Since Akka Streams are an implementation of Reactive Streams, by induction,
one can also say that stream refs allow running *Reactive Streams over the network*.
@@@
## Stream References
The prime use case for stream refs is to replace raw actor or HTTP messaging between systems where a long
running stream of data is expected between two entities. Often times, they can be used to effectively achieve point
to point streaming without the need of setting up additional message brokers or similar secondary clusters.
Stream refs are well suited for any system in which you need to send messages between nodes and need to do so in a
flow-controlled fashion. Typical examples include sending work requests to worker nodes, as fast as possible, but
not faster than the worker node can process them, or sending data elements which the downstream may be slow at processing.
It is recommended to mix and introduce stream refs in Actor messaging based systems, where the actor messaging is used to
orchestrate and prepare such message flows, and later the stream refs are used to do the flow-controlled message transfer.
Stream refs are not persistent, however it is simple to build a resume-able stream by introducing such protocol
on the actor messaging layer. Stream refs are absolutely expected to be sent over Akka remoting to other nodes
within a cluster, and as such, complement and do not compete with plain Actor messaging.
Actors would usually be used to establish the stream, by means of some initial message saying
"I want to offer you many log elements (the stream ref)", or alternatively in the opposite way "If you need
to send me much data, here is the stream ref you can use to do so".
Since the two sides ("local" and "remote") of reach reference may be confusing to simply refer to as
"remote" and "local" -- since either side can be seen as "local" or "remote" depending how we look at it --
we propose to use the terminology "origin" and "target", which is defined by where the stream ref was created.
For `SourceRef`s, the "origin" is the side which has the data that it is going to stream out. For `SinkRef`s
the "origin" side is the actor system that is ready to receive the data and has allocated the ref. Those
two may be seen as duals of each other, however to explain patterns about sharing references, we found this
wording to be rather useful.
### Source Refs - offering streaming data over network
A @scala[@scaladoc[`SourceRef`](akka.stream.SourceRef)]@java[@javadoc[`SourceRef`](akka.stream.SourceRef)]
can be offered to a remote actor system in order for it to consume some source of data that we have prepared
locally.
In order to share a `Source` with a remote endpoint you need to materialize it by running it into the `Sink.sourceRef`.
That sink materializes the `SourceRef` that you can then send to other nodes. Please note that it materializes into a
`Future` so you will have to use the pipeTo
Scala
: @@snip [FlowStreamRefsDocSpec.scala]($code$/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #offer-source }
The origin actor which creates and owns the Source could also perform some validation or additional setup
when preparing the source. Once it has handed out the `SourceRef` the remote side can run it like this:
Scala
: @@snip [FlowStreamRefsDocSpec.scala]($code$/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #offer-source-use }
The process of preparing and running a `SourceRef` powered distributed stream is shown by the animation below:
![SourceRef usage animation](../images/source-ref-animation.gif)
@@@ warning
A `SourceRef` is *by design* "single-shot". i.e. it may only be materialized once.
This is in order to not complicate the mental model what materializing such value would mean.
By being single-shot, we always know what it means, and on top of those semantics offer a fan-out
by emitting multiple `SourceRef`s which target the same `Source` that uses `Broadcast`.
This also allows for fine grained control how many streams a system can expect to be running
at the same time, which is useful for capacity planning and "allowed number of concurrent streams
limiting" of clients.
@@@
### Sink Refs - offering to receive streaming data
The dual of source references are A @scala[@scaladoc[`SourceRef`](akka.stream.SinkRef)]@java[@javadoc[`SourceRef`](akka.stream.SinkRef)]s. They can be used to offer the other side the capability to
send to the *origin* side data in a streaming, flow-controlled fashion. The origin here allocates a Sink,
which could be as simple as a `Sink.foreach` or as advanced as a complex sink which streams the incoming data
into various other systems (e.g. any of the Alpakka provided Sinks).
@@@ note
To form a good mental model of `SinkRef`s, you can think of them as being similar to "passive mode" in FTP.
@@@
Scala
: @@snip [FlowStreamRefsDocSpec.scala]($code$/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #offer-sink }
Using the offered `SinkRef` to send data to the origin of the Sink is also simple, as we can treat the
SinkRef just as any other Sink and directly `runWith` or `run` with it.
Scala
: @@snip [FlowStreamRefsDocSpec.scala]($code$/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #offer-sink-use }
![simple-graph-example.png](../images/sink-ref-dance.png)
@@@ warning
A `SinkeRef` is *by design* "single-shot". i.e. it may only be materialized once.
This is in order to not complicate the mental model what materializing such value would mean.
If you have an use case for building a fan-in operation accepting writes from multiple remote nodes,
you can build your Sink and prepend it with a `Merge` stage, each time materializing a new `SinkRef`
targeting that Merge. This has the added benefit of giving you full control how to merge these streams
(i.e. by using "merge preferred" or any other variation of the fan-in stages).
@@@
## Bulk Stream References
@@@ warning
Not yet implemented. See ticket ...... FIXME, ticket number
@@@
Bulk stream references can be used to create simple to use side-channels to transfer humongous amounts
of data such as huge log files, messages or even media, with as much ease as if it was a trivial local stream.
Connections for each stream ref bulk stream ref are established independently, and do not utilise
actor messaging (which is not designed for such bulk transfers, but rather small messages).
## Configuration
### Stream reference subscription timeouts
All stream references have a subscription timeout, which is intended to prevent resource leaks
in situations in which a remote node would requests the allocation of many streams yet never actually run
them. In order to prevent this, each stream reference has a default timeout (of 30 seconds), after which
if it's "handed out" side has not been materialized, the origin will terminate with a timeout exception,
and IF the remote side eventually would be run afterwards, it would also immediately fail with an exception
pointing out that the origin seems to be missing.
Since these timeouts are often very different based on the kind of stream offered, and there can be
many different kinds of them in the same application, it is possible to not only configure this setting
globally (`akka.stream.materializer.stream-ref.subscription-timeout`), but also via attributes:
Scala
: @@snip [FlowStreamRefsDocSpec.scala]($code$/scala/docs/stream/FlowStreamRefsDocSpec.scala) { #attr-sub-timeout }
## General configuration
Other settings can be set globally, in your `application.conf`, by overriding any of the following values
in the `akka.stream.materializer.stream-ref.*` keyspace:
@@snip [reference.conf]($akka$/akka-stream/src/main/resources/reference.conf) { #stream-ref }

View file

@ -0,0 +1,125 @@
/**
* Copyright (C) 2018 Lightbend Inc. <http://www.lightbend.com>
*/
package docs.stream
import akka.NotUsed
import akka.actor.{ Actor, Props }
import akka.stream.ActorMaterializer
import akka.stream.scaladsl._
import akka.testkit.AkkaSpec
import docs.CompileOnlySpec
class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec {
"offer a source ref" in compileOnlySpec {
//#offer-source
import akka.stream.SourceRef
case class RequestLogs(streamId: Int)
case class LogsOffer(streamId: Int, sourceRef: SourceRef[String])
class DataSource extends Actor {
implicit val mat = ActorMaterializer()(context)
def receive = {
case RequestLogs(streamId)
// obtain the source you want to offer:
val source: Source[String, NotUsed] = streamLogs(streamId)
// materialize the SourceRef:
val ref: SourceRef[String] = source.runWith(Sink.sourceRef())
// wrap the SourceRef in some domain message, such that the sender knows what source it is
val reply: LogsOffer = LogsOffer(streamId, ref)
// reply to sender
sender() ! reply
}
def streamLogs(streamId: Long): Source[String, NotUsed] = ???
}
//#offer-source
implicit val mat = ActorMaterializer()
//#offer-source-use
val sourceActor = system.actorOf(Props[DataSource], "dataSource")
sourceActor ! RequestLogs(1337)
val offer = expectMsgType[LogsOffer]
// implicitly converted to a Source:
offer.sourceRef.runWith(Sink.foreach(println))
// alternatively explicitly obtain Source from SourceRef:
// offer.sourceRef.source.runWith(Sink.foreach(println))
//#offer-source-use
}
"offer a sink ref" in compileOnlySpec {
//#offer-sink
import akka.pattern._
import akka.stream.SinkRef
case class PrepareUpload(sourceId: String)
case class MeasurementsSinkReady(sourceId: String, sinkRef: SinkRef[String])
class DataReceiver extends Actor {
import context.dispatcher
implicit val mat = ActorMaterializer()(context)
def receive = {
case PrepareUpload(nodeId)
// obtain the source you want to offer:
val sink: Sink[String, NotUsed] = logsSinkFor(nodeId)
// materialize the SinkRef (the remote is like a source of data for us):
val ref: SinkRef[String] = Source.sinkRef[String]().to(sink).run()
// wrap the SinkRef in some domain message, such that the sender knows what source it is
val reply: MeasurementsSinkReady = MeasurementsSinkReady(nodeId, ref)
// reply to sender
sender() ! reply
}
def logsSinkFor(nodeId: String): Sink[String, NotUsed] = ???
}
//#offer-sink
implicit val mat = ActorMaterializer()
def localMetrics(): Source[String, NotUsed] = Source.single("")
//#offer-sink-use
val receiver = system.actorOf(Props[DataReceiver], "receiver")
receiver ! PrepareUpload("system-42-tmp")
val ready = expectMsgType[MeasurementsSinkReady]
// stream local metrics to Sink's origin:
localMetrics().runWith(ready.sinkRef)
//#offer-sink-use
}
"show how to configure timeouts with attrs" in compileOnlySpec {
implicit val mat: ActorMaterializer = null
//#attr-sub-timeout
// configure the timeout for source
import scala.concurrent.duration._
import akka.stream.StreamRefAttributes
// configuring SourceRef.sink (notice that we apply the attributes to the Sink!):
Source.repeat("hello")
.runWith(Sink.sourceRef().addAttributes(StreamRefAttributes.subscriptionTimeout(5.seconds)))
// configuring SinkRef.source:
Source.sinkRef().addAttributes(StreamRefAttributes.subscriptionTimeout(5.seconds))
.runWith(Sink.ignore) // not very interesting Sink, just an example
//#attr-sub-timeout
}
}

View file

@ -87,7 +87,8 @@ private[remote] final class SendQueue[T] extends GraphStageWithMaterializedValue
needWakeup = true
// additional poll() to grab any elements that might missed the needWakeup
// and have been enqueued just after it
if (firstAttempt) tryPush(firstAttempt = false)
if (firstAttempt)
tryPush(firstAttempt = false)
case elem
needWakeup = false // there will be another onPull
push(out, elem)

View file

@ -1,274 +0,0 @@
/**
* Copyright (C) 2014-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.remote
import akka.NotUsed
import akka.actor.{ Actor, ActorIdentity, ActorLogging, ActorRef, ActorSystem, ActorSystemImpl, Identify, Props }
import akka.stream.ActorMaterializer
import akka.stream.remote.scaladsl.{ SinkRef, SourceRef }
import akka.stream.scaladsl.{ Sink, Source }
import akka.testkit.{ AkkaSpec, ImplicitSender, SocketUtil, TestKit, TestProbe }
import akka.util.ByteString
import com.typesafe.config._
import scala.concurrent.duration._
import scala.concurrent.{ Await, Future }
object StreamRefsSpec {
object DatasourceActor {
def props(probe: ActorRef): Props =
Props(new DatasourceActor(probe))
.withDispatcher("akka.test.stream-dispatcher")
}
class DatasourceActor(probe: ActorRef) extends Actor with ActorLogging {
implicit val mat = ActorMaterializer()
def receive = {
case "give"
/*
* Here we're able to send a source to a remote recipient
*
* For them it's a Source; for us it is a Sink we run data "into"
*/
val source: Source[String, NotUsed] = Source(List("hello", "world"))
val ref: Future[SourceRef[String]] = source.runWith(SourceRef.sink())
println(s"source = ${source}")
println(s"ref = ${Await.result(ref, 10.seconds)}")
sender() ! Await.result(ref, 10.seconds)
// case "send-bulk"
// /*
// * Here we're able to send a source to a remote recipient
// * The source is a "bulk transfer one, in which we're ready to send a lot of data"
// *
// * For them it's a Source; for us it is a Sink we run data "into"
// */
// val source: Source[ByteString, NotUsed] = Source.single(ByteString("huge-file-"))
// val ref: SourceRef[ByteString] = source.runWith(SourceRef.bulkTransfer())
// sender() ! BulkSourceMsg(ref)
case "receive"
/*
* We write out code, knowing that the other side will stream the data into it.
*
* For them it's a Sink; for us it's a Source.
*/
val sink: Future[SinkRef[String]] =
SinkRef.source[String]
.to(Sink.actorRef(probe, "<COMPLETE>"))
.run()
// FIXME we want to avoid forcing people to do the Future here
sender() ! Await.result(sink, 10.seconds)
// case "receive-bulk"
// /*
// * We write out code, knowing that the other side will stream the data into it.
// * This will open a dedicated connection per transfer.
// *
// * For them it's a Sink; for us it's a Source.
// */
// val sink: SinkRef[ByteString] =
// SinkRef.bulkTransferSource()
// .to(Sink.actorRef(probe, "<COMPLETE>"))
// .run()
//
//
// sender() ! BulkSinkMsg(sink)
}
}
// -------------------------
final case class SourceMsg(dataSource: SourceRef[String])
final case class BulkSourceMsg(dataSource: SourceRef[ByteString])
final case class SinkMsg(dataSink: SinkRef[String])
final case class BulkSinkMsg(dataSink: SinkRef[ByteString])
def config(): Config = {
val address = SocketUtil.temporaryServerAddress()
ConfigFactory.parseString(
s"""
akka {
loglevel = INFO
actor {
provider = remote
serialize-messages = off
// serializers {
// akka-stream-ref-test = "akka.stream.remote.StreamRefsSpecSerializer"
// }
//
// serialization-bindings {
// "akka.stream.remote.StreamRefsSpec$$SourceMsg" = akka-stream-ref-test
// "akka.stream.remote.StreamRefsSpec$$BulkSourceMsg" = akka-stream-ref-test
// "akka.stream.remote.StreamRefsSpec$$SinkMsg" = akka-stream-ref-test
// "akka.stream.remote.StreamRefsSpec$$BulkSinkMsg" = akka-stream-ref-test
// }
//
// serialization-identifiers {
// "akka.stream.remote.StreamRefsSpecSerializer" = 33
// }
}
remote.netty.tcp {
port = ${address.getPort}
hostname = "${address.getHostName}"
}
}
""").withFallback(ConfigFactory.load())
}
}
class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSender {
import StreamRefsSpec._
def this() {
this(StreamRefsSpec.config())
}
val remoteSystem = ActorSystem("RemoteSystem", StreamRefsSpec.config())
implicit val mat = ActorMaterializer()
override protected def beforeTermination(): Unit =
TestKit.shutdownActorSystem(remoteSystem)
val p = TestProbe()
// obtain the remoteActor ref via selection in order to use _real_ remoting in this test
val remoteActor = {
val it = remoteSystem.actorOf(DatasourceActor.props(p.ref), "remoteActor")
val remoteAddress = remoteSystem.asInstanceOf[ActorSystemImpl].provider.getDefaultAddress
system.actorSelection(it.path.toStringWithAddress(remoteAddress)) ! Identify("hi")
expectMsgType[ActorIdentity].ref.get
}
"A SourceRef" must {
"send messages via remoting" in {
remoteActor ! "give"
val sourceRef = expectMsgType[SourceRef[String]]
Source.fromGraph(sourceRef)
.log("RECEIVED")
.runWith(Sink.actorRef(p.ref, "<COMPLETE>"))
p.expectMsg("hello")
p.expectMsg("world")
p.expectMsg("<COMPLETE>")
}
}
"A SinkRef" must {
"receive elements via remoting" in {
remoteActor ! "receive"
val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]]
Source("hello" :: "world" :: Nil)
.to(remoteSink)
.run()
p.expectMsg("hello")
p.expectMsg("world")
p.expectMsg("<COMPLETE>")
}
"fail origin if remote Sink gets a failure" in {
remoteActor ! "receive"
val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]]
val remoteFailureMessage = "Booom!"
Source.failed(new Exception(remoteFailureMessage))
.to(remoteSink)
.run()
val f = p.expectMsgType[akka.actor.Status.Failure]
f.cause.getMessage should ===(s"Remote Sink failed, reason: $remoteFailureMessage")
}
"receive hundreds of elements via remoting" in {
remoteActor ! "receive"
val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]]
val msgs = (1 to 100).toList.map(i s"payload-$i")
Source(msgs)
.to(remoteSink)
.run()
msgs.foreach(t p.expectMsg(t))
p.expectMsg("<COMPLETE>")
}
// "fail origin if remote Sink is stopped abruptly" in {
// val otherSystem = ActorSystem("OtherRemoteSystem", StreamRefsSpec.config())
//
// try {
// // obtain the remoteActor ref via selection in order to use _real_ remoting in this test
// val remoteActor = {
// val it = otherSystem.actorOf(DatasourceActor.props(p.ref), "remoteActor")
// val remoteAddress = otherSystem.asInstanceOf[ActorSystemImpl].provider.getDefaultAddress
// system.actorSelection(it.path.toStringWithAddress(remoteAddress)) ! Identify("hi")
// expectMsgType[ActorIdentity].ref.get
// }
//
// remoteActor ! "receive"
// val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]]
//
// val otherMat = ActorMaterializer()(otherSystem)
// Source.maybe[String] // not emitting anything
// .to(remoteSink)
// .run()(otherMat)
//
// // and the system crashes; which should cause abrupt termination in the stream
// Thread.sleep(300)
// otherMat.shutdown()
//
// val f = p.expectMsgType[akka.actor.Status.Failure]
// f.cause.getMessage should ===(s"Remote Sink failed, reason:")
// } finally TestKit.shutdownActorSystem(otherSystem)
// }
}
}
//
//class StreamRefsSpecSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest with BaseSerializer {
//
// lazy val ext = SerializationExtension(system)
//
// override def manifest(o: AnyRef): String = o match {
// case StreamRefsSpec.SinkMsg(_) "si"
// case StreamRefsSpec.BulkSinkMsg(_) "bsi"
// case StreamRefsSpec.SourceMsg(_) "so"
// case StreamRefsSpec.BulkSourceMsg(_) "bso"
// }
//
// override def toBinary(o: AnyRef): Array[Byte] = {
// system.log.warning("Serializing: " + o)
// o match {
// case StreamRefsSpec.SinkMsg(s) s.
// case StreamRefsSpec.BulkSinkMsg(s) ext.serialize(s).get
// case StreamRefsSpec.SourceMsg(s) ext.serialize(s).get
// case StreamRefsSpec.BulkSourceMsg(s) ext.serialize(s).get
// }
// }
//
// override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = {
// system.log.warning("MANI: " + manifest)
// ???
// }
//
//}

View file

@ -0,0 +1,366 @@
/**
* Copyright (C) 2014-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.scaladsl
import akka.NotUsed
import akka.actor.Status.Failure
import akka.actor.{ Actor, ActorIdentity, ActorLogging, ActorRef, ActorSystem, ActorSystemImpl, Identify, Props }
import akka.pattern._
import akka.stream.testkit.TestPublisher
import akka.stream.testkit.scaladsl._
import akka.stream.{ ActorMaterializer, SinkRef, SourceRef, StreamRefAttributes }
import akka.testkit.{ AkkaSpec, ImplicitSender, SocketUtil, TestKit, TestProbe }
import akka.util.ByteString
import com.typesafe.config._
import scala.concurrent.duration._
import scala.concurrent.Future
import scala.util.control.NoStackTrace
object StreamRefsSpec {
object DataSourceActor {
def props(probe: ActorRef): Props =
Props(new DataSourceActor(probe))
.withDispatcher("akka.test.stream-dispatcher")
}
class DataSourceActor(probe: ActorRef) extends Actor with ActorLogging {
implicit val mat = ActorMaterializer()
def receive = {
case "give"
/*
* Here we're able to send a source to a remote recipient
*
* For them it's a Source; for us it is a Sink we run data "into"
*/
val source: Source[String, NotUsed] = Source(List("hello", "world"))
val ref: SourceRef[String] = source.runWith(Sink.sourceRef())
sender() ! ref
case "give-infinite"
val source: Source[String, NotUsed] = Source.fromIterator(() Iterator.from(1)).map("ping-" + _)
val ref: SourceRef[String] = source.runWith(Sink.sourceRef())
sender() ! ref
case "give-fail"
val ref = Source.failed[String](new Exception("Booooom!") with NoStackTrace)
.runWith(Sink.sourceRef())
sender() ! ref
case "give-complete-asap"
val ref = Source.empty
.runWith(Sink.sourceRef())
sender() ! ref
case "give-subscribe-timeout"
val ref = Source.repeat("is anyone there?")
.toMat(Sink.sourceRef())(Keep.right) // attributes like this so they apply to the Sink.sourceRef
.withAttributes(StreamRefAttributes.subscriptionTimeout(500.millis))
.run()
sender() ! ref
// case "send-bulk"
// /*
// * Here we're able to send a source to a remote recipient
// * The source is a "bulk transfer one, in which we're ready to send a lot of data"
// *
// * For them it's a Source; for us it is a Sink we run data "into"
// */
// val source: Source[ByteString, NotUsed] = Source.single(ByteString("huge-file-"))
// val ref: SourceRef[ByteString] = source.runWith(SourceRef.bulkTransfer())
// sender() ! BulkSourceMsg(ref)
case "receive"
/*
* We write out code, knowing that the other side will stream the data into it.
*
* For them it's a Sink; for us it's a Source.
*/
val sink: SinkRef[String] =
Source.sinkRef[String]
.to(Sink.actorRef(probe, "<COMPLETE>"))
.run()
sender() ! sink
case "receive-subscribe-timeout"
val sink = Source.sinkRef[String]
.withAttributes(StreamRefAttributes.subscriptionTimeout(500.millis))
.to(Sink.actorRef(probe, "<COMPLETE>"))
.run()
sender() ! sink
case "receive-32"
val (sink, driver) = Source.sinkRef[String]
.toMat(TestSink.probe(context.system))(Keep.both)
.run()
import context.dispatcher
Future {
driver.ensureSubscription()
driver.request(2)
driver.expectNext()
driver.expectNext()
driver.expectNoMessage(100.millis)
driver.request(30)
driver.expectNextN(30)
"<COMPLETED>"
} pipeTo probe
sender() ! sink
// case "receive-bulk"
// /*
// * We write out code, knowing that the other side will stream the data into it.
// * This will open a dedicated connection per transfer.
// *
// * For them it's a Sink; for us it's a Source.
// */
// val sink: SinkRef[ByteString] =
// SinkRef.bulkTransferSource()
// .to(Sink.actorRef(probe, "<COMPLETE>"))
// .run()
//
//
// sender() ! BulkSinkMsg(sink)
}
}
// -------------------------
final case class SourceMsg(dataSource: SourceRef[String])
final case class BulkSourceMsg(dataSource: SourceRef[ByteString])
final case class SinkMsg(dataSink: SinkRef[String])
final case class BulkSinkMsg(dataSink: SinkRef[ByteString])
def config(): Config = {
val address = SocketUtil.temporaryServerAddress()
ConfigFactory.parseString(
s"""
akka {
loglevel = INFO
actor {
provider = remote
serialize-messages = off
}
remote.netty.tcp {
port = ${address.getPort}
hostname = "${address.getHostName}"
}
}
""").withFallback(ConfigFactory.load())
}
}
class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSender {
import StreamRefsSpec._
def this() {
this(StreamRefsSpec.config())
}
val remoteSystem = ActorSystem("RemoteSystem", StreamRefsSpec.config())
implicit val mat = ActorMaterializer()
override protected def beforeTermination(): Unit =
TestKit.shutdownActorSystem(remoteSystem)
val p = TestProbe()
// obtain the remoteActor ref via selection in order to use _real_ remoting in this test
val remoteActor = {
val it = remoteSystem.actorOf(DataSourceActor.props(p.ref), "remoteActor")
val remoteAddress = remoteSystem.asInstanceOf[ActorSystemImpl].provider.getDefaultAddress
system.actorSelection(it.path.toStringWithAddress(remoteAddress)) ! Identify("hi")
expectMsgType[ActorIdentity].ref.get
}
"A SourceRef" must {
"send messages via remoting" in {
remoteActor ! "give"
val sourceRef = expectMsgType[SourceRef[String]]
sourceRef
.runWith(Sink.actorRef(p.ref, "<COMPLETE>"))
p.expectMsg("hello")
p.expectMsg("world")
p.expectMsg("<COMPLETE>")
}
"fail when remote source failed" in {
remoteActor ! "give-fail"
val sourceRef = expectMsgType[SourceRef[String]]
sourceRef
.runWith(Sink.actorRef(p.ref, "<COMPLETE>"))
val f = p.expectMsgType[Failure]
f.cause.getMessage should include("Remote stream (")
// actor name here, for easier identification
f.cause.getMessage should include("failed, reason: Booooom!")
}
"complete properly when remote source is empty" in {
// this is a special case since it makes sure that the remote stage is still there when we connect to it
remoteActor ! "give-complete-asap"
val sourceRef = expectMsgType[SourceRef[String]]
sourceRef
.runWith(Sink.actorRef(p.ref, "<COMPLETE>"))
p.expectMsg("<COMPLETE>")
}
"respect back-pressure from (implied by target Sink)" in {
remoteActor ! "give-infinite"
val sourceRef = expectMsgType[SourceRef[String]]
val probe = sourceRef
.runWith(TestSink.probe)
probe.ensureSubscription()
probe.expectNoMessage(100.millis)
probe.request(1)
probe.expectNext("ping-1")
probe.expectNoMessage(100.millis)
probe.request(20)
probe.expectNextN((1 to 20).map(i "ping-" + (i + 1)))
probe.cancel()
// since no demand anyway
probe.expectNoMessage(100.millis)
// should not cause more pulling, since we issued a cancel already
probe.request(10)
probe.expectNoMessage(100.millis)
}
"receive timeout if subscribing too late to the source ref" in {
remoteActor ! "give-subscribe-timeout"
val remoteSource: SourceRef[String] = expectMsgType[SourceRef[String]]
// not materializing it, awaiting the timeout...
Thread.sleep(800) // the timeout is 500ms
val probe = remoteSource
.runWith(TestSink.probe[String](system))
// val failure = p.expectMsgType[Failure]
// failure.cause.getMessage should include("[SourceRef-0] Remote side did not subscribe (materialize) handed out Sink reference")
// the local "remote sink" should cancel, since it should notice the origin target actor is dead
probe.ensureSubscription()
val ex = probe.expectError()
ex.getMessage should include("has terminated! Tearing down this side of the stream as well.")
}
}
"A SinkRef" must {
"receive elements via remoting" in {
remoteActor ! "receive"
val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]]
Source("hello" :: "world" :: Nil)
.to(remoteSink)
.run()
p.expectMsg("hello")
p.expectMsg("world")
p.expectMsg("<COMPLETE>")
}
"fail origin if remote Sink gets a failure" in {
remoteActor ! "receive"
val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]]
val remoteFailureMessage = "Booom!"
Source.failed(new Exception(remoteFailureMessage))
.to(remoteSink)
.run()
val f = p.expectMsgType[akka.actor.Status.Failure]
f.cause.getMessage should include(s"Remote stream (")
// actor name ere, for easier identification
f.cause.getMessage should include(s"failed, reason: $remoteFailureMessage")
}
"receive hundreds of elements via remoting" in {
remoteActor ! "receive"
val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]]
val msgs = (1 to 100).toList.map(i s"payload-$i")
Source(msgs)
.runWith(remoteSink)
msgs.foreach(t p.expectMsg(t))
p.expectMsg("<COMPLETE>")
}
"receive timeout if subscribing too late to the sink ref" in {
remoteActor ! "receive-subscribe-timeout"
val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]]
// not materializing it, awaiting the timeout...
Thread.sleep(800) // the timeout is 500ms
val probe = TestSource.probe[String](system)
.to(remoteSink)
.run()
val failure = p.expectMsgType[Failure]
failure.cause.getMessage should include("Remote side did not subscribe (materialize) handed out Sink reference")
// the local "remote sink" should cancel, since it should notice the origin target actor is dead
probe.expectCancellation()
}
"respect back -pressure from (implied by origin Sink)" in {
remoteActor ! "receive-32"
val sinkRef = expectMsgType[SinkRef[String]]
Source.repeat("hello") runWith sinkRef
// if we get this message, it means no checks in the request/expect semantics were broken, good!
p.expectMsg("<COMPLETED>")
}
"not allow materializing multiple times" in {
remoteActor ! "receive"
val sinkRef = expectMsgType[SinkRef[String]]
val p1: TestPublisher.Probe[String] = TestSource.probe[String].to(sinkRef).run()
val p2: TestPublisher.Probe[String] = TestSource.probe[String].to(sinkRef).run()
p1.ensureSubscription()
val req = p1.expectRequest()
// will be cancelled immediately, since it's 2nd:
p2.ensureSubscription()
p2.expectCancellation()
}
}
}

View file

@ -0,0 +1,3 @@
# #24230 stream refs - SourceRef / SinkRef
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.ActorMaterializerSettings.this")
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.stage.GraphStageLogic#StageActor.this")

View file

@ -2,42 +2,36 @@
* Copyright (C) 2009-2017 Lightbend Inc. <http://www.lightbend.com>
*/
option java_package = "akka.stream.remote";
option java_package = "akka.stream";
option optimize_for = SPEED;
/*************************************************
StreamRefs (SourceRef / SinkRef) related formats
**************************************************/
// TODO make explicit types for SinkRef/SourceRef as they can carry initial settings (initial demand etc)
message SinkRef {
required ActorRef targetRef = 1;
optional int64 initialDemand = 2;
}
message SourceRef {
required ActorRef originRef = 1;
// FIXME: it's basically SinkRef since we just ommit the optional initial demand...
// FIXME: could it be that all those passable refs should be expressed internally as a StreamRef?
}
message ActorRef {
required string path = 1;
}
message Option {
optional Payload value = 1;
}
message Payload {
required bytes enclosedMessage = 1;
required int32 serializerId = 2;
optional bytes messageManifest = 4;
optional bytes messageManifest = 3;
}
// stream refs protocol
message OnSubscribeHandshake {
required ActorRef targetRef = 1;
}
message CumulativeDemand {
required int64 seqNr = 1;
}
@ -47,10 +41,10 @@ message SequencedOnNext {
required Payload payload = 2;
}
message RemoteSinkFailure {
message RemoteStreamFailure {
optional bytes cause = 1;
}
message RemoteSinkCompleted {
message RemoteStreamCompleted {
required int64 seqNr = 1;
}

View file

@ -82,6 +82,33 @@ akka {
# to disable the usage of the buffer.
write-buffer-size = 16 KiB
}
//#stream-ref
# configure defaults for SourceRef and SinkRef
stream-ref {
# Buffer of a SinkRef that is used to batch Request elements from the other side of the stream ref
#
# The buffer will be attempted to be filled eagerly even while the local stage did not request elements,
# because the delay of requesting over network boundaries is much higher.
buffer-capacity = 32
# Demand is signalled by sending a cumulative demand message ("requesting messages until the n-th sequence number)
# Using a cumulative demand model allows us to re-deliver the demand message in case of message loss (which should
# be very rare in any case, yet possible -- mostly under connection break-down and re-establishment).
#
# The semantics of handling and updating the demand however are in-line with what Reactive Streams dictates.
#
# In normal operation, demand is signalled in response to arriving elements, however if no new elements arrive
# within `demand-redelivery-interval` a re-delivery of the demand will be triggered, assuming that it may have gotten lost.
demand-redelivery-interval = 1 second
# Subscription timeout, during which the "remote side" MUST subscribe (materialize) the handed out stream ref.
# This timeout does not have to be very low in normal situations, since the remote side may also need to
# prepare things before it is ready to materialize the reference. However the timeout is needed to avoid leaking
# in-active streams which are never subscribed to.
subscription-timeout = 30 seconds
}
//#stream-ref
}
# Fully qualified config path which holds the dispatcher configuration
@ -99,33 +126,6 @@ akka {
}
}
# configure defaults for SourceRef and SinkRef
stream-refs {
# Default initial demand to be sent along with a SinkRef.
#
# The receiving side will be prepared to buffer as much as that many elements,
# and the sending side can assume to safely send as many elements without receiving
# an demand message before.
initial-demand = 4
# Demand is signalled by sending a cumulative demand message ("requesting messages until the n-th sequence number)
# Using a cumulative demand model allows us to re-deliver the demand message in case of message loss (which should
# be very rare in any case, yet possible -- mostly under connection break-down and re-establishment).
#
# The semantics of handling and updating the demand however are in-line with what Reactive Streams dictates.
#
# In normal operation, demand is signalled in response to arriving elements, however if no new elements arrive
# within `demand-redelivery-interval` a re-delivery of the demand will be triggered, assuming that it may have gotten lost.
demand-redelivery-interval = 500 ms
# Idle timeout, after which both sides of the stream-ref will terminate.
# Notice that demand-redelivery works as a keep-alive, and if a remote Sink keeps receiving the demand re-deliveries,
# it knows the other side is still alive and will not terminate. The other-side though will eventually decide, by its
# idle-timeout that "did not get any signal from the remote, over idle-timeout, so will terminate" and that termination
# would be signalled back to the remote Sink.
idle-timeout = 5 seconds
}
}
# configure overrides to ssl-configuration here (to be used by akka-streams, and akka-http i.e. when serving https connections)
@ -136,17 +136,17 @@ akka {
actor {
serializers {
akka-stream-refs = "akka.stream.remote.serialization.StreamRefSerializer"
akka-stream-ref = "akka.stream.serialization.StreamRefSerializer"
}
serialization-bindings {
"akka.stream.remote.scaladsl.SinkRef" = akka-stream-refs
"akka.stream.remote.scaladsl.SourceRef" = akka-stream-refs
"akka.stream.remote.StreamRefs$Protocol" = akka-stream-refs
"akka.stream.SinkRef" = akka-stream-ref
"akka.stream.SourceRef" = akka-stream-ref
"akka.stream.impl.streamref.StreamRefsProtocol" = akka-stream-ref
}
serialization-identifiers {
"akka.stream.remote.serialization.StreamRefSerializer" = 30
"akka.stream.serialization.StreamRefSerializer" = 30
}
}
}

View file

@ -11,10 +11,11 @@ import akka.event.LoggingAdapter
import akka.util.Helpers.toRootLowerCase
import akka.stream.ActorMaterializerSettings.defaultMaxFixedBufferSize
import akka.stream.impl._
import com.typesafe.config.Config
import com.typesafe.config.{ Config, ConfigFactory }
import scala.concurrent.duration._
import akka.japi.function
import akka.stream.impl.streamref.StreamRefSettingsImpl
import akka.stream.stage.GraphStageLogic
import scala.util.control.NoStackTrace
@ -233,6 +234,8 @@ object ActorMaterializerSettings {
/**
* Create [[ActorMaterializerSettings]] from individual settings (Scala).
*/
@Deprecated
@deprecated("Create the settings using the apply(system) or apply(config) method, and then modify them using the .with methods.", since = "2.5.10")
def apply(
initialInputBufferSize: Int,
maxInputBufferSize: Int,
@ -246,7 +249,7 @@ object ActorMaterializerSettings {
maxFixedBufferSize: Int) =
new ActorMaterializerSettings(
initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging,
outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize)
outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize, defaultMaxFixedBufferSize, defaultIoSettings, defaultStreamRefSettings)
/**
* Create [[ActorMaterializerSettings]] from the settings of an [[akka.actor.ActorSystem]] (Scala).
@ -270,11 +273,14 @@ object ActorMaterializerSettings {
autoFusing = config.getBoolean("auto-fusing"),
maxFixedBufferSize = config.getInt("max-fixed-buffer-size"),
syncProcessingLimit = config.getInt("sync-processing-limit"),
ioSettings = IOSettings(config.getConfig("io")))
ioSettings = IOSettings(config.getConfig("io")),
streamRefSettings = StreamRefSettings(config.getConfig("stream-ref")))
/**
* Create [[ActorMaterializerSettings]] from individual settings (Java).
*/
@Deprecated
@deprecated("Create the settings using the create(system) or create(config) method, and then modify them using the .with methods.", since = "2.5.10")
def create(
initialInputBufferSize: Int,
maxInputBufferSize: Int,
@ -288,7 +294,7 @@ object ActorMaterializerSettings {
maxFixedBufferSize: Int) =
new ActorMaterializerSettings(
initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging,
outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize)
outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize, defaultMaxFixedBufferSize, defaultIoSettings, defaultStreamRefSettings)
/**
* Create [[ActorMaterializerSettings]] from the settings of an [[akka.actor.ActorSystem]] (Java).
@ -303,6 +309,9 @@ object ActorMaterializerSettings {
apply(config)
private val defaultMaxFixedBufferSize = 1000
private val defaultIoSettings = IOSettings(tcpWriteBufferSize = 16 * 1024)
// sadly due to the existence of the create-from-individual-parts methods, we need to replicate the defaults here from reference.conf...
private val defaultStreamRefSettings: StreamRefSettings = StreamRefSettings(ConfigFactory.load().getConfig("akka.stream.materializer.stream-ref"))
}
/**
@ -327,38 +336,8 @@ final class ActorMaterializerSettings private (
val autoFusing: Boolean,
val maxFixedBufferSize: Int,
val syncProcessingLimit: Int,
val ioSettings: IOSettings) {
// backwards compatibility when added IOSettings, shouldn't be needed since private, but added to satisfy mima
def this(
initialInputBufferSize: Int,
maxInputBufferSize: Int,
dispatcher: String,
supervisionDecider: Supervision.Decider,
subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
debugLogging: Boolean,
outputBurstLimit: Int,
fuzzingMode: Boolean,
autoFusing: Boolean,
maxFixedBufferSize: Int,
syncProcessingLimit: Int) =
this(initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging,
outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize, syncProcessingLimit,
IOSettings(tcpWriteBufferSize = 16 * 1024))
def this(
initialInputBufferSize: Int,
maxInputBufferSize: Int,
dispatcher: String,
supervisionDecider: Supervision.Decider,
subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
debugLogging: Boolean,
outputBurstLimit: Int,
fuzzingMode: Boolean,
autoFusing: Boolean,
maxFixedBufferSize: Int) =
this(initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging,
outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize, defaultMaxFixedBufferSize)
val ioSettings: IOSettings,
val streamRefSettings: StreamRefSettings) {
require(initialInputBufferSize > 0, "initialInputBufferSize must be > 0")
require(syncProcessingLimit > 0, "syncProcessingLimit must be > 0")
@ -378,10 +357,11 @@ final class ActorMaterializerSettings private (
autoFusing: Boolean = this.autoFusing,
maxFixedBufferSize: Int = this.maxFixedBufferSize,
syncProcessingLimit: Int = this.syncProcessingLimit,
ioSettings: IOSettings = this.ioSettings) = {
ioSettings: IOSettings = this.ioSettings,
streamRefSettings: StreamRefSettings = this.streamRefSettings) = {
new ActorMaterializerSettings(
initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging,
outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize, syncProcessingLimit, ioSettings)
outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize, syncProcessingLimit, ioSettings, streamRefSettings)
}
/**
@ -500,6 +480,11 @@ final class ActorMaterializerSettings private (
if (ioSettings == this.ioSettings) this
else copy(ioSettings = ioSettings)
/** Change settings specific to [[SourceRef]] and [[SinkRef]]. */
def withStreamRefSettings(streamRefSettings: StreamRefSettings): ActorMaterializerSettings =
if (streamRefSettings == this.streamRefSettings) this
else copy(streamRefSettings = streamRefSettings)
private def requirePowerOfTwo(n: Integer, name: String): Unit = {
require(n > 0, s"$name must be > 0")
require((n & (n - 1)) == 0, s"$name must be a power of two")

View file

@ -11,6 +11,7 @@ import scala.annotation.tailrec
import scala.reflect.{ ClassTag, classTag }
import akka.japi.function
import java.net.URLEncoder
import java.util.concurrent.TimeUnit
import akka.annotation.InternalApi
import akka.stream.impl.TraversalBuilder
@ -18,6 +19,8 @@ import akka.stream.impl.TraversalBuilder
import scala.compat.java8.OptionConverters._
import akka.util.{ ByteString, OptionVal }
import scala.concurrent.duration.FiniteDuration
/**
* Holds attributes which can be used to alter [[akka.stream.scaladsl.Flow]] / [[akka.stream.javadsl.Flow]]
* or [[akka.stream.scaladsl.GraphDSL]] / [[akka.stream.javadsl.GraphDSL]] materialization.
@ -414,3 +417,22 @@ object ActorAttributes {
Attributes(LogLevels(onElement, onFinish, onFailure))
}
/**
* Attributes for stream refs ([[akka.stream.SourceRef]] and [[akka.stream.SinkRef]]).
* Note that more attributes defined in [[Attributes]] and [[ActorAttributes]].
*/
object StreamRefAttributes {
import Attributes._
/** Attributes specific to stream refs. */
sealed trait StreamRefAttribute extends Attribute
final case class SubscriptionTimeout(timeout: FiniteDuration) extends StreamRefAttribute
/**
* Specifies the subscription timeout within which the remote side MUST subscribe to the handed out stream reference.
*/
def subscriptionTimeout(timeout: FiniteDuration): Attributes = Attributes(SubscriptionTimeout(timeout))
}

View file

@ -0,0 +1,6 @@
/**
* Copyright (C) 2018 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream
import akka.actor.ActorRef

View file

@ -0,0 +1,52 @@
/**
* Copyright (C) 2018 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream
import java.util.concurrent.TimeUnit
import akka.actor.ActorSystem
import akka.annotation.DoNotInherit
import akka.stream.impl.streamref.StreamRefSettingsImpl
import com.typesafe.config.Config
import scala.concurrent.duration._
object StreamRefSettings {
/** Java API */
def create(system: ActorSystem): StreamRefSettings = apply(system)
/** Scala API */
def apply(system: ActorSystem): StreamRefSettings = {
apply(system.settings.config.getConfig("akka.stream.materializer.stream-ref"))
}
/** Java API */
def create(c: Config): StreamRefSettings = apply(c)
/** Scala API */
def apply(c: Config): StreamRefSettings = {
StreamRefSettingsImpl(
bufferCapacity = c.getInt("buffer-capacity"),
demandRedeliveryInterval = c.getDuration("demand-redelivery-interval", TimeUnit.MILLISECONDS).millis,
subscriptionTimeout = c.getDuration("subscription-timeout", TimeUnit.MILLISECONDS).millis
)
}
}
/**
* Settings specific to [[SourceRef]] and [[SinkRef]].
* More detailed documentation about each of the settings is available in `reference.conf`.
*/
@DoNotInherit
trait StreamRefSettings {
def bufferCapacity: Int
def demandRedeliveryInterval: FiniteDuration
def subscriptionTimeout: FiniteDuration
// --- with... methods ---
def withBufferCapacity(value: Int): StreamRefSettings
def withDemandRedeliveryInterval(value: scala.concurrent.duration.FiniteDuration): StreamRefSettings
def withSubscriptionTimeout(value: FiniteDuration): StreamRefSettings
}

View file

@ -0,0 +1,92 @@
/**
* Copyright (C) 2018 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream
import akka.NotUsed
import akka.actor.ActorRef
import akka.stream.scaladsl.{ Sink, Source }
import scala.language.implicitConversions
/**
* See full documentation on [[SinkRef]].
*/
object SinkRef {
/** Implicitly converts a [[SinkRef]] to a [[Sink]]. The same can be achieved by calling `.sink` on the reference. */
implicit def convertRefToSink[T](sinkRef: SinkRef[T]): Sink[T, NotUsed] = sinkRef.sink
}
/**
* A [[SinkRef]] allows sharing a "reference" to a [[Sink]] with others, with the main purpose of crossing a network boundary.
* Usually obtaining a SinkRef would be done via Actor messaging, in which one system asks a remote one,
* to accept some data from it, and the remote one decides to accept the request to send data in a back-pressured
* streaming fashion -- using a sink ref.
*
* To create a [[SinkRef]] you have to materialize the `Sink` that you want to obtain a reference to by attaching it to a `Source.sinkRef`.
*
* Stream refs can be seen as Reactive Streams over network boundaries.
* See also [[akka.stream.SourceRef]] which is the dual of a `SinkRef`.
*
* For additional configuration see `reference.conf` as well as [[akka.stream.StreamRefAttributes]].
*/
trait SinkRef[In] {
/** Scala API: Get [[Sink]] underlying to this source ref. */
def sink: Sink[In, NotUsed]
/** Java API: Get [[javadsl.Sink]] underlying to this source ref. */
def getSink: javadsl.Sink[In, NotUsed]
}
/**
* See full documentation on [[SourceRef]].
*/
object SourceRef {
/** Implicitly converts a SourceRef to a Source. The same can be achieved by calling `.source` on the SourceRef itself. */
implicit def convertRefToSource[T](ref: SourceRef[T]): Source[T, NotUsed] =
ref.source
}
/**
* A SourceRef allows sharing a "reference" with others, with the main purpose of crossing a network boundary.
* Usually obtaining a SourceRef would be done via Actor messaging, in which one system asks a remote one,
* to share some data with it, and the remote one decides to do so in a back-pressured streaming fashion -- using a stream ref.
*
* To create a [[SourceRef]] you have to materialize the `Source` that you want to obtain a reference to by attaching it to a `Sink.sourceRef`.
*
* Stream refs can be seen as Reactive Streams over network boundaries.
* See also [[akka.stream.SinkRef]] which is the dual of a `SourceRef`.
*
* For additional configuration see `reference.conf` as well as [[akka.stream.StreamRefAttributes]].
*/
trait SourceRef[T] {
/** Scala API: Get [[Source]] underlying to this source ref. */
def source: Source[T, NotUsed]
/** Java API: Get [[javadsl.Source]] underlying to this source ref. */
def getSource: javadsl.Source[T, NotUsed]
}
// --- exceptions ---
final case class TargetRefNotInitializedYetException()
extends IllegalStateException("Internal remote target actor ref not yet resolved, yet attempted to send messages to it. This should not happen due to proper flow-control, please open a ticket on the issue tracker: https://github.com/akka/akka")
final case class StreamRefSubscriptionTimeoutException(msg: String)
extends IllegalStateException(msg)
final case class RemoteStreamRefActorTerminatedException(msg: String) extends RuntimeException(msg)
final case class InvalidSequenceNumberException(expectedSeqNr: Long, gotSeqNr: Long, msg: String)
extends IllegalStateException(s"$msg (expected: $expectedSeqNr, got: $gotSeqNr)")
/**
* Stream refs establish a connection between a local and remote actor, representing the origin and remote sides
* of a stream. Each such actor refers to the other side as its "partner". We make sure that no other actor than
* the initial partner can send demand/messages to the other side accidentally.
*
* This exception is thrown when a message is recived from a non-partner actor,
* which could mean a bug or some actively malicient behaviour from the other side.
*
* This is not meant as a security feature, but rather as plain sanity-check.
*/
final case class InvalidPartnerActorException(expectedRef: ActorRef, gotRef: ActorRef, msg: String)
extends IllegalStateException(s"$msg (expected: $expectedRef, got: $gotRef)")

View file

@ -73,6 +73,9 @@ private[akka] object Buffer {
def used: Int = (writeIdx - readIdx).toInt
def isFull: Boolean = used == capacity
def nonFull: Boolean = used < capacity
def remainingCapacity: Int = capacity - used
def isEmpty: Boolean = used == 0
def nonEmpty: Boolean = used != 0

View file

@ -125,6 +125,7 @@ import akka.stream._
val fanoutPublisherSink = name("fanoutPublisherSink")
val ignoreSink = name("ignoreSink")
val actorRefSink = name("actorRefSink")
val futureFlattenSink = name("futureFlattenSink")
val actorRefWithAck = name("actorRefWithAckSink")
val actorSubscriberSink = name("actorSubscriberSink")
val queueSink = name("queueSink")

View file

@ -371,6 +371,90 @@ import scala.util.control.NonFatal
override def toString: String = "FutureFlattenSource"
}
// final class FutureFlattenSink[T, M](futureSink: Future[Graph[SinkShape[T], M]])
// extends GraphStageWithMaterializedValue[SinkShape[T], Future[M]] {
// ReactiveStreamsCompliance.requireNonNullElement(futureSink)
//
// val in: Inlet[T] = Inlet("FutureFlattenSink.in")
// override val shape = SinkShape(in)
//
// override def initialAttributes = DefaultAttributes.futureFlattenSink
//
// override def createLogicAndMaterializedValue(attr: Attributes): (GraphStageLogic, Future[M]) = {
// val materialized = Promise[M]()
//
// val logic = new GraphStageLogic(shape) with InHandler with OutHandler {
// private val sourceOut = new SubSourceOutlet[T]("FutureFlattenSink.out")
//
// override def preStart(): Unit =
// futureSink.value match {
// case Some(it)
// // this optimisation avoids going through any execution context, in similar vein to FastFuture
// onFutureSinkCompleted(it)
// case _
// val cb = getAsyncCallback[Try[Graph[SinkShape[T], M]]](onFutureSinkCompleted).invoke _
// futureSink.onComplete(cb)(ExecutionContexts.sameThreadExecutionContext) // could be optimised FastFuture-like
// }
//
// // initial handler (until future completes)
// setHandler(in, new InHandler {
// override def onPush(): Unit = grab(it)
//
//// def onPull(): Unit = {}
////
//// override def onDownstreamFinish(): Unit = {
//// if (!materialized.isCompleted) {
//// // we used to try to materialize the "inner" source here just to get
//// // the materialized value, but that is not safe and may cause the graph shell
//// // to leak/stay alive after the stage completes
////
//// materialized.tryFailure(new StreamDetachedException("Stream cancelled before Source Future completed"))
//// }
////
//// super.onDownstreamFinish()
//// }
//})
//
// def onPush(): Unit =
// push(out, sourceOut.grab())
//
// def onPull(): Unit =
// sourceOut.pull()
//
// override def onUpstreamFinish(): Unit =
// completeStage()
//
// override def postStop(): Unit =
// if (!sourceOut.isClosed) sourceOut.cancel()
//
// def onFutureSinkCompleted(result: Try[Graph[SinkShape[T], M]]): Unit = {
// result.map { graph
// val runnable = Sink.fromGraph(graph).toMat(sourceOut.sink)(Keep.left)
// val matVal = interpreter.subFusingMaterializer.materialize(runnable, defaultAttributes = attr)
// materialized.success(matVal)
//
// setHandler(out, this)
// sourceOut.setHandler(this)
//
// if (isAvailable(out)) {
// sourceOut.pull()
// }
//
// }.recover {
// case t
// sourceOut.cancel()
// materialized.failure(t)
// failStage(t)
// }
// }
// }
//
// (logic, materialized.future)
// }
//
// override def toString: String = "FutureFlattenSource"
// }
final class FutureSource[T](val future: Future[T]) extends GraphStage[SourceShape[T]] {
ReactiveStreamsCompliance.requireNonNullElement(future)
val shape = SourceShape(Outlet[T]("FutureSource.out"))
@ -466,3 +550,4 @@ import scala.util.control.NonFatal
}
}

View file

@ -0,0 +1,49 @@
/**
* Copyright (C) 2018 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.impl.streamref
import akka.NotUsed
import akka.annotation.InternalApi
import akka.stream.{ SinkRef, javadsl }
import akka.stream.scaladsl.Sink
import scala.concurrent.Future
import scala.util.{ Failure, Success }
/**
* INTERNAL API
* Allows users to directly use the SinkRef, even though we do have to go through the Future in order to be able
* to materialize it. Since we initialize the ref from within the GraphStageLogic. See [[SinkRefStageImpl]] for usage.
*/
@InternalApi
private[akka] final case class MaterializedSinkRef[In](futureSink: Future[SinkRefImpl[In]]) extends SinkRef[In] {
override def sink: Sink[In, NotUsed] =
futureSink.value match {
case Some(Success(ready))
// the normal case, since once materialization finishes, the future is guaranteed to have been completed
ready.sink
case Some(Failure(cause))
// materialization failed
Sink.cancelled
case None
throw new Exception(s"This should not be possible! We guarantee to complete the materialized Future value when materialization finishes! Sink was: $futureSink")
// // not yet materialized -- in reality this case should not happen, since once materialization is finished, this Future is already completed
// // this impl is kept in case materialization semantics would change for some reason
// Source.fromFutureSource(futureSource.map(ref => ref.source)(ex)).mapMaterializedValue(_ NotUsed)
}
override def getSink: javadsl.Sink[In, NotUsed] = sink.asJava
override def toString: String =
futureSink.value match {
case None s"SinkRef(<materializing-source-ref>)"
case Some(Success(ready)) ready.toString
case Some(Failure(ex)) s"SinkRef(<failed:${ex.getMessage}>)"
}
}

View file

@ -0,0 +1,49 @@
/**
* Copyright (C) 2018 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.impl.streamref
import akka.NotUsed
import akka.annotation.InternalApi
import akka.stream.scaladsl.Source
import akka.stream.{ SourceRef, javadsl }
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.{ Failure, Success }
/**
* INTERNAL API
* Allows users to directly use the SourceRef, even though we do have to go through the Future in order to be able
* to materialize it. Since we initialize the ref from within the GraphStageLogic. See [[SourceRefStageImpl]] for usage.
*/
@InternalApi
private[akka] final case class MaterializedSourceRef[Out](futureSource: Future[SourceRefImpl[Out]]) extends SourceRef[Out] {
override def source: Source[Out, NotUsed] =
futureSource.value match {
case Some(Success(ready))
// the normal case, since once materialization finishes, the future is guaranteed to have been completed
ready.source
case Some(Failure(cause))
// materialization failed
Source.failed(cause).named("SourceRef")
case None
throw new Exception(s"This should not be possible! We guarantee to complete the materialized Future value when materialization finishes! Source was: $futureSource")
// // not yet materialized -- in reality this case should not happen, since once materialization is finished, this Future is already completed
// // this impl is kept in case materialization semantics would change for some reason
// Source.fromFutureSource(futureSource.map(ref => ref.source)(ex)).mapMaterializedValue(_ NotUsed)
}
override def getSource: javadsl.Source[Out, NotUsed] = source.asJava
override def toString: String =
futureSource.value match {
case None s"SourceRef(<materializing-source-ref>)"
case Some(Success(ready)) ready.toString
case Some(Failure(ex)) s"SourceRef(<failed:${ex.getMessage}>)"
}
}

View file

@ -0,0 +1,209 @@
/**
* Copyright (C) 2018 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.impl.streamref
import scala.language.implicitConversions
import akka.Done
import akka.NotUsed
import akka.actor.{ ActorRef, Terminated }
import akka.annotation.InternalApi
import akka.event.Logging
import akka.stream._
import akka.stream.scaladsl.Sink
import akka.stream.stage._
import akka.util.{ OptionVal, PrettyDuration }
import scala.concurrent.{ Future, Promise }
import scala.util.Try
/** INTERNAL API: Implementation class, not intended to be touched directly by end-users */
@InternalApi
private[stream] final case class SinkRefImpl[In](initialPartnerRef: ActorRef) extends SinkRef[In] {
override def sink: Sink[In, NotUsed] =
Sink.fromGraph(new SinkRefStageImpl[In](OptionVal.Some(initialPartnerRef))).mapMaterializedValue(_ NotUsed)
override def getSink: javadsl.Sink[In, NotUsed] = sink.asJava
}
/**
* INTERNAL API: Actual stage implementation backing [[SinkRef]]s.
*
* If initialPartnerRef is set, then the remote side is already set up. If it is none, then we are the side creating
* the ref.
*/
@InternalApi
private[stream] final class SinkRefStageImpl[In] private[akka] (
val initialPartnerRef: OptionVal[ActorRef]
) extends GraphStageWithMaterializedValue[SinkShape[In], SourceRef[In]] {
val in: Inlet[In] = Inlet[In](s"${Logging.simpleName(getClass)}($initialRefName).in")
override def shape: SinkShape[In] = SinkShape.of(in)
private def initialRefName: String =
initialPartnerRef match {
case OptionVal.Some(ref) ref.toString
case OptionVal.None "<no-initial-ref>"
}
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = {
val promise = Promise[SourceRefImpl[In]]
val logic = new TimerGraphStageLogic(shape) with StageLogging with InHandler {
private[this] lazy val streamRefsMaster = StreamRefsMaster(ActorMaterializerHelper.downcast(materializer).system)
// settings ---
import StreamRefAttributes._
private[this] lazy val settings = ActorMaterializerHelper.downcast(materializer).settings.streamRefSettings
private[this] lazy val subscriptionTimeout = inheritedAttributes
.get[StreamRefAttributes.SubscriptionTimeout](SubscriptionTimeout(settings.subscriptionTimeout))
// end of settings ---
override protected lazy val stageActorName: String = streamRefsMaster.nextSinkRefStageName()
private[this] var self: GraphStageLogic.StageActor = _
implicit def selfSender: ActorRef = self.ref
private var partnerRef: OptionVal[ActorRef] = OptionVal.None
private def getPartnerRef: ActorRef =
partnerRef match {
case OptionVal.Some(ref) ref
case OptionVal.None throw TargetRefNotInitializedYetException()
}
val SubscriptionTimeoutTimerKey = "SubscriptionTimeoutKey"
// demand management ---
private var remoteCumulativeDemandReceived: Long = 0L
private var remoteCumulativeDemandConsumed: Long = 0L
// end of demand management ---
private var completedBeforeRemoteConnected: OptionVal[Try[Done]] = OptionVal.None
override def preStart(): Unit = {
self = getStageActor(initialReceive)
if (initialPartnerRef.isDefined) // this will set the `partnerRef`
observeAndValidateSender(initialPartnerRef.get, "Illegal initialPartnerRef! This would be a bug in the SinkRef usage or impl.")
log.debug("Created SinkRef, pointing to remote Sink receiver: {}, local worker: {}", initialPartnerRef, self.ref)
promise.success(SourceRefImpl(self.ref))
partnerRef match {
case OptionVal.Some(ref)
ref ! StreamRefsProtocol.OnSubscribeHandshake(self.ref)
tryPull()
case _ // nothing to do
}
scheduleOnce(SubscriptionTimeoutTimerKey, subscriptionTimeout.timeout)
}
lazy val initialReceive: ((ActorRef, Any)) Unit = {
case (_, Terminated(ref))
if (ref == getPartnerRef)
failStage(RemoteStreamRefActorTerminatedException(s"Remote target receiver of data $partnerRef terminated. " +
s"Local stream terminating, message loss (on remote side) may have happened."))
case (sender, StreamRefsProtocol.CumulativeDemand(d))
observeAndValidateSender(sender, "Illegal sender for CumulativeDemand")
if (remoteCumulativeDemandReceived < d) {
remoteCumulativeDemandReceived = d
log.debug("Received cumulative demand [{}], consumable demand: [{}]", StreamRefsProtocol.CumulativeDemand(d), remoteCumulativeDemandReceived - remoteCumulativeDemandConsumed)
}
tryPull()
}
override def onPush(): Unit = {
val elem = grabSequenced(in)
getPartnerRef ! elem
log.debug("Sending sequenced: {} to {}", elem, getPartnerRef)
tryPull()
}
private def tryPull() =
if (remoteCumulativeDemandConsumed < remoteCumulativeDemandReceived && !hasBeenPulled(in)) {
pull(in)
}
override protected def onTimer(timerKey: Any): Unit = timerKey match {
case SubscriptionTimeoutTimerKey
val ex = StreamRefSubscriptionTimeoutException(
// we know the future has been competed by now, since it is in preStart
s"[$stageActorName] Remote side did not subscribe (materialize) handed out Sink reference [${promise.future.value}], " +
s"within subscription timeout: ${PrettyDuration.format(subscriptionTimeout.timeout)}!")
throw ex // this will also log the exception, unlike failStage; this should fail rarely, but would be good to have it "loud"
}
private def grabSequenced[T](in: Inlet[T]): StreamRefsProtocol.SequencedOnNext[T] = {
val onNext = StreamRefsProtocol.SequencedOnNext(remoteCumulativeDemandConsumed, grab(in))
remoteCumulativeDemandConsumed += 1
onNext
}
override def onUpstreamFailure(ex: Throwable): Unit =
partnerRef match {
case OptionVal.Some(ref)
ref ! StreamRefsProtocol.RemoteStreamFailure(ex.getMessage)
self.unwatch(getPartnerRef)
super.onUpstreamFailure(ex)
case _
completedBeforeRemoteConnected = OptionVal(scala.util.Failure(ex))
// not terminating on purpose, since other side may subscribe still and then we want to fail it
setKeepGoing(true)
}
override def onUpstreamFinish(): Unit =
partnerRef match {
case OptionVal.Some(ref)
ref ! StreamRefsProtocol.RemoteStreamCompleted(remoteCumulativeDemandConsumed)
self.unwatch(getPartnerRef)
super.onUpstreamFinish()
case _
completedBeforeRemoteConnected = OptionVal(scala.util.Success(Done))
// not terminating on purpose, since other side may subscribe still and then we want to complete it
setKeepGoing(true)
}
@throws[InvalidPartnerActorException]
def observeAndValidateSender(partner: ActorRef, failureMsg: String): Unit = {
if (partnerRef.isEmpty) {
partnerRef = OptionVal(partner)
self.watch(partner)
completedBeforeRemoteConnected match {
case OptionVal.Some(scala.util.Failure(ex))
log.warning("Stream already terminated with exception before remote side materialized, failing now.")
partner ! StreamRefsProtocol.RemoteStreamFailure(ex.getMessage)
failStage(ex)
case OptionVal.Some(scala.util.Success(Done))
log.warning("Stream already completed before remote side materialized, failing now.")
partner ! StreamRefsProtocol.RemoteStreamCompleted(remoteCumulativeDemandConsumed)
completeStage()
case OptionVal.None
if (partner != getPartnerRef) {
val ex = InvalidPartnerActorException(partner, getPartnerRef, failureMsg)
partner ! StreamRefsProtocol.RemoteStreamFailure(ex.getMessage)
throw ex
} // else { ref is valid }
}
}
}
setHandler(in, this)
}
(logic, MaterializedSourceRef[In](promise.future))
}
override def toString = s"${Logging.simpleName(getClass)}($initialRefName)"
}

View file

@ -0,0 +1,235 @@
/**
* Copyright (C) 2018 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.impl.streamref
import akka.NotUsed
import akka.actor.{ ActorRef, Terminated }
import akka.annotation.InternalApi
import akka.event.Logging
import akka.stream._
import akka.stream.actor.{ RequestStrategy, WatermarkRequestStrategy }
import akka.stream.impl.FixedSizeBuffer
import akka.stream.scaladsl.Source
import akka.stream.stage._
import akka.util.{ OptionVal, PrettyDuration }
import scala.concurrent.Promise
import scala.language.implicitConversions
/** INTERNAL API: Implementation class, not intended to be touched directly by end-users */
@InternalApi
private[stream] final case class SourceRefImpl[T](initialPartnerRef: ActorRef) extends SourceRef[T] {
def source: Source[T, NotUsed] =
Source.fromGraph(new SourceRefStageImpl(OptionVal.Some(initialPartnerRef))).mapMaterializedValue(_ NotUsed)
def getSource: javadsl.Source[T, NotUsed] = source.asJava
}
/**
* INTERNAL API: Actual stage implementation backing [[SourceRef]]s.
*
* If initialPartnerRef is set, then the remote side is already set up.
* If it is none, then we are the side creating the ref.
*/
@InternalApi
private[stream] final class SourceRefStageImpl[Out](
val initialPartnerRef: OptionVal[ActorRef]
) extends GraphStageWithMaterializedValue[SourceShape[Out], SinkRef[Out]] {
val out: Outlet[Out] = Outlet[Out](s"${Logging.simpleName(getClass)}.out")
override def shape = SourceShape.of(out)
private def initialRefName =
initialPartnerRef match {
case OptionVal.Some(ref) ref.toString
case _ "<no-initial-ref>"
}
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, SinkRef[Out]) = {
val promise = Promise[SinkRefImpl[Out]]()
val logic = new TimerGraphStageLogic(shape) with StageLogging with OutHandler {
private[this] lazy val streamRefsMaster = StreamRefsMaster(ActorMaterializerHelper.downcast(materializer).system)
// settings ---
import StreamRefAttributes._
private[this] lazy val settings = ActorMaterializerHelper.downcast(materializer).settings.streamRefSettings
private[this] lazy val subscriptionTimeout = inheritedAttributes
.get[StreamRefAttributes.SubscriptionTimeout](SubscriptionTimeout(settings.subscriptionTimeout))
// end of settings ---
override protected lazy val stageActorName: String = streamRefsMaster.nextSourceRefStageName()
private[this] var self: GraphStageLogic.StageActor = _
private[this] implicit def selfSender: ActorRef = self.ref
val SubscriptionTimeoutTimerKey = "SubscriptionTimeoutKey"
val DemandRedeliveryTimerKey = "DemandRedeliveryTimerKey"
// demand management ---
private var completed = false
private var expectingSeqNr: Long = 0L
private var localCumulativeDemand: Long = 0L
private var localRemainingRequested: Int = 0
private var receiveBuffer: FixedSizeBuffer.FixedSizeBuffer[Out] = _ // initialized in preStart since depends on settings
private var requestStrategy: RequestStrategy = _ // initialized in preStart since depends on receiveBuffer's size
// end of demand management ---
// initialized with the originRef if present, that means we're the "remote" for an already active Source on the other side (the "origin")
// null otherwise, in which case we allocated first -- we are the "origin", and awaiting the other side to start when we'll receive this ref
private var partnerRef: OptionVal[ActorRef] = OptionVal.None
private def getPartnerRef = partnerRef.get
override def preStart(): Unit = {
receiveBuffer = FixedSizeBuffer[Out](settings.bufferCapacity)
requestStrategy = WatermarkRequestStrategy(highWatermark = receiveBuffer.capacity)
self = getStageActor(initialReceive)
log.debug("[{}] Allocated receiver: {}", stageActorName, self.ref)
if (initialPartnerRef.isDefined) // this will set the partnerRef
observeAndValidateSender(initialPartnerRef.get, "<no error case here, definitely valid>")
promise.success(SinkRefImpl(self.ref))
scheduleOnce(SubscriptionTimeoutTimerKey, subscriptionTimeout.timeout)
}
override def onPull(): Unit = {
tryPush()
triggerCumulativeDemand()
}
def triggerCumulativeDemand(): Unit = {
val i = receiveBuffer.remainingCapacity - localRemainingRequested
if (partnerRef.isDefined && i > 0) {
val addDemand = requestStrategy.requestDemand(receiveBuffer.used + localRemainingRequested)
// only if demand has increased we shoot it right away
// otherwise it's the same demand level, so it'd be triggered via redelivery anyway
if (addDemand > 0) {
localCumulativeDemand += addDemand
localRemainingRequested += addDemand
val demand = StreamRefsProtocol.CumulativeDemand(localCumulativeDemand)
log.debug("[{}] Demanding until [{}] (+{})", stageActorName, localCumulativeDemand, addDemand)
getPartnerRef ! demand
scheduleDemandRedelivery()
}
}
}
def scheduleDemandRedelivery() =
scheduleOnce(DemandRedeliveryTimerKey, settings.demandRedeliveryInterval)
override protected def onTimer(timerKey: Any): Unit = timerKey match {
case SubscriptionTimeoutTimerKey
val ex = StreamRefSubscriptionTimeoutException(
// we know the future has been competed by now, since it is in preStart
s"[$stageActorName] Remote side did not subscribe (materialize) handed out Sink reference [${promise.future.value}]," +
s"within subscription timeout: ${PrettyDuration.format(subscriptionTimeout.timeout)}!")
throw ex // this will also log the exception, unlike failStage; this should fail rarely, but would be good to have it "loud"
case DemandRedeliveryTimerKey
log.debug("[{}] Scheduled re-delivery of demand until [{}]", stageActorName, localCumulativeDemand)
getPartnerRef ! StreamRefsProtocol.CumulativeDemand(localCumulativeDemand)
scheduleDemandRedelivery()
}
lazy val initialReceive: ((ActorRef, Any)) Unit = {
case (sender, msg @ StreamRefsProtocol.OnSubscribeHandshake(remoteRef))
cancelTimer("SubscriptionTimeoutTimerKey")
observeAndValidateSender(remoteRef, "Illegal sender in SequencedOnNext")
log.debug("[{}] Received handshake {} from {}", stageActorName, msg, sender)
triggerCumulativeDemand()
case (sender, msg @ StreamRefsProtocol.SequencedOnNext(seqNr, payload: Out @unchecked))
observeAndValidateSender(sender, "Illegal sender in SequencedOnNext")
observeAndValidateSequenceNr(seqNr, "Illegal sequence nr in SequencedOnNext")
log.debug("[{}] Received seq {} from {}", stageActorName, msg, sender)
onReceiveElement(payload)
triggerCumulativeDemand()
case (sender, StreamRefsProtocol.RemoteStreamCompleted(seqNr))
observeAndValidateSender(sender, "Illegal sender in RemoteSinkCompleted")
observeAndValidateSequenceNr(seqNr, "Illegal sequence nr in RemoteSinkCompleted")
log.debug("[{}] The remote stream has completed, completing as well...", stageActorName)
self.unwatch(sender)
completed = true
tryPush()
case (sender, StreamRefsProtocol.RemoteStreamFailure(reason))
observeAndValidateSender(sender, "Illegal sender in RemoteSinkFailure")
log.warning("[{}] The remote stream has failed, failing (reason: {})", stageActorName, reason)
self.unwatch(sender)
failStage(RemoteStreamRefActorTerminatedException(s"Remote stream (${sender.path}) failed, reason: $reason"))
case (_, Terminated(ref))
partnerRef match {
case OptionVal.Some(`ref`)
failStage(RemoteStreamRefActorTerminatedException(s"The remote partner $ref has terminated! " +
s"Tearing down this side of the stream as well."))
case _
// this should not have happened! It should be impossible that we watched some other actor
failStage(RemoteStreamRefActorTerminatedException(s"Received UNEXPECTED Terminated($ref) message! " +
s"This actor was NOT our trusted remote partner, which was: $getPartnerRef. Tearing down."))
}
}
def tryPush(): Unit =
if (receiveBuffer.nonEmpty) push(out, receiveBuffer.dequeue())
else if ( /* buffer is empty && */ completed) completeStage()
private def onReceiveElement(payload: Out): Unit = {
localRemainingRequested -= 1
if (receiveBuffer.isEmpty && isAvailable(out))
push(out, payload)
else if (receiveBuffer.isFull)
throw new IllegalStateException(s"Attempted to overflow buffer! Capacity: ${receiveBuffer.capacity}, incoming element: $payload, localRemainingRequested: ${localRemainingRequested}, localCumulativeDemand: ${localCumulativeDemand}")
else
receiveBuffer.enqueue(payload)
}
/** @throws InvalidPartnerActorException when partner ref is invalid */
def observeAndValidateSender(partner: ActorRef, msg: String): Unit =
partnerRef match {
case OptionVal.None
log.debug("Received first message from {}, assuming it to be the remote partner for this stage", partner)
partnerRef = OptionVal(partner)
self.watch(partner)
case OptionVal.Some(ref)
if (partner != ref) {
val ex = InvalidPartnerActorException(partner, getPartnerRef, msg)
partner ! StreamRefsProtocol.RemoteStreamFailure(ex.getMessage)
throw ex
} // else, ref is valid and we don't need to do anything with it
}
/** @throws InvalidSequenceNumberException when sequence number is is invalid */
def observeAndValidateSequenceNr(seqNr: Long, msg: String): Unit =
if (isInvalidSequenceNr(seqNr)) {
throw InvalidSequenceNumberException(expectingSeqNr, seqNr, msg)
} else {
expectingSeqNr += 1
}
def isInvalidSequenceNr(seqNr: Long): Boolean =
seqNr != expectingSeqNr
setHandler(out, this)
}
(logic, MaterializedSinkRef[Out](promise.future))
}
override def toString: String =
s"${Logging.simpleName(getClass)}($initialRefName)}"
}

View file

@ -0,0 +1,25 @@
/**
* Copyright (C) 2018 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.impl.streamref
import akka.annotation.InternalApi
import akka.event.Logging
import akka.stream.StreamRefSettings
import scala.concurrent.duration.FiniteDuration
/** INTERNAL API */
@InternalApi
private[akka] final case class StreamRefSettingsImpl private (
override val bufferCapacity: Int,
override val demandRedeliveryInterval: FiniteDuration,
override val subscriptionTimeout: FiniteDuration
) extends StreamRefSettings {
override def withBufferCapacity(value: Int): StreamRefSettings = copy(bufferCapacity = value)
override def withDemandRedeliveryInterval(value: scala.concurrent.duration.FiniteDuration): StreamRefSettings = copy(demandRedeliveryInterval = value)
override def withSubscriptionTimeout(value: FiniteDuration): StreamRefSettings = copy(subscriptionTimeout = value)
override def productPrefix: String = Logging.simpleName(classOf[StreamRefSettings])
}

View file

@ -0,0 +1,37 @@
/**
* Copyright (C) 2018 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.impl.streamref
import akka.actor.{ ActorSystem, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider }
import akka.annotation.InternalApi
import akka.stream.impl.SeqActorName
/** INTERNAL API */
@InternalApi
private[stream] object StreamRefsMaster extends ExtensionId[StreamRefsMaster] with ExtensionIdProvider {
override def createExtension(system: ExtendedActorSystem): StreamRefsMaster =
new StreamRefsMaster(system)
override def lookup(): StreamRefsMaster.type = this
override def get(system: ActorSystem): StreamRefsMaster = super.get(system)
}
/** INTERNAL API */
@InternalApi
private[stream] final class StreamRefsMaster(system: ExtendedActorSystem) extends Extension {
private[this] val sourceRefStageNames = SeqActorName("SourceRef") // "local target"
private[this] val sinkRefStageNames = SeqActorName("SinkRef") // "remote sender"
// TODO introduce a master with which all stages running the streams register themselves?
def nextSourceRefStageName(): String =
sourceRefStageNames.next()
def nextSinkRefStageName(): String =
sinkRefStageNames.next()
}

View file

@ -0,0 +1,57 @@
/**
* Copyright (C) 2018 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.impl.streamref
import akka.actor.{ ActorRef, DeadLetterSuppression }
import akka.annotation.InternalApi
import akka.stream.impl.ReactiveStreamsCompliance
/** INTERNAL API */
@InternalApi
private[akka] sealed trait StreamRefsProtocol
/** INTERNAL API */
@InternalApi
private[akka] object StreamRefsProtocol {
/**
* Sequenced `Subscriber#onNext` equivalent.
* The receiving end of these messages MUST fail the stream if it observes gaps in the sequence,
* as these messages will not be re-delivered.
*
* Sequence numbers start from `0`.
*/
@InternalApi
private[akka] final case class SequencedOnNext[T](seqNr: Long, payload: T) extends StreamRefsProtocol with DeadLetterSuppression {
if (payload == null) throw ReactiveStreamsCompliance.elementMustNotBeNullException
}
/**
* INTERNAL API: Initial message sent to remote side to establish partnership between origin and remote stream refs.
*/
@InternalApi
private[akka] final case class OnSubscribeHandshake(targetRef: ActorRef) extends StreamRefsProtocol with DeadLetterSuppression
/**
* INTERNAL API: Sent to a the receiver side of a stream ref, once the sending side of the SinkRef gets signalled a Failure.
*/
@InternalApi
private[akka] final case class RemoteStreamFailure(msg: String) extends StreamRefsProtocol
/**
* INTERNAL API: Sent to a the receiver side of a stream ref, once the sending side of the SinkRef gets signalled a completion.
*/
@InternalApi
private[akka] final case class RemoteStreamCompleted(seqNr: Long) extends StreamRefsProtocol
/**
* INTERNAL API: Cumulative demand, equivalent to sequence numbering all events in a stream.
*
* This message may be re-delivered.
*/
@InternalApi
private[akka] final case class CumulativeDemand(seqNr: Long) extends StreamRefsProtocol with DeadLetterSuppression {
if (seqNr <= 0) throw ReactiveStreamsCompliance.numberOfElementsInRequestMustBePositiveException
}
}

View file

@ -278,6 +278,18 @@ object Sink {
new Sink(scaladsl.Sink.lazyInit[T, M](
t sinkFactory.apply(t).toScala.map(_.asScala)(ExecutionContexts.sameThreadExecutionContext),
() fallback.create()).mapMaterializedValue(_.toJava))
/**
* A local [[Sink]] which materializes a [[SourceRef]] which can be used by other streams (including remote ones),
* to consume data from this local stream, as if they were attached in the spot of the local Sink directly.
*
* Adheres to [[StreamRefAttributes]].
*
* See more detailed documentation on [[SourceRef]].
*/
def sourceRef[T](): javadsl.Sink[T, SourceRef[T]] =
scaladsl.Sink.sourceRef[T]().asJava
}
/**

View file

@ -25,6 +25,8 @@ import scala.compat.java8.OptionConverters._
import java.util.concurrent.CompletionStage
import java.util.concurrent.CompletableFuture
import akka.stream.scaladsl.Sink
import scala.compat.java8.FutureConverters._
/** Java API */
@ -442,6 +444,17 @@ object Source {
() create.create().toScala,
(s: S) read.apply(s).toScala.map(_.asScala)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext),
(s: S) close.apply(s).toScala))
/**
* A local [[Sink]] which materializes a [[SourceRef]] which can be used by other streams (including remote ones),
* to consume data from this local stream, as if they were attached in the spot of the local Sink directly.
*
* Adheres to [[StreamRefAttributes]].
*
* See more detailed documentation on [[SinkRef]].
*/
def sinkRef[T](): javadsl.Source[T, SinkRef[T]] =
scaladsl.Source.sinkRef[T]().asJava
}
/**

View file

@ -1,67 +0,0 @@
/**
* Copyright (C) 2014-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.remote
import akka.actor.{ ActorRef, DeadLetterSuppression }
import akka.annotation.InternalApi
import akka.stream.impl.ReactiveStreamsCompliance
/** INTERNAL API: Protocol messages used by the various stream -ref implementations. */
@InternalApi
private[akka] object StreamRefs {
@InternalApi
sealed trait Protocol
/**
* Sequenced `Subscriber#onNext` equivalent.
* The receiving end of these messages MUST fail the stream if it observes gaps in the sequence,
* as these messages will not be re-delivered.
*
* Sequence numbers start from `0`.
*/
@InternalApi
final case class SequencedOnNext[T](seqNr: Long, payload: T) extends StreamRefs.Protocol {
if (payload == null) throw ReactiveStreamsCompliance.elementMustNotBeNullException
}
/** Sent to a the receiver side of a SinkRef, once the sending side of the SinkRef gets signalled a Failure. */
@InternalApi
final case class RemoteSinkFailure(msg: String) extends StreamRefs.Protocol
/** Sent to a the receiver side of a SinkRef, once the sending side of the SinkRef gets signalled a completion. */
@InternalApi
final case class RemoteSinkCompleted(seqNr: Long) extends StreamRefs.Protocol
/**
* Cumulative demand, equivalent to sequence numbering all events in a stream. *
* This message may be re-delivered.
*/
@InternalApi
final case class CumulativeDemand(seqNr: Long) extends StreamRefs.Protocol with DeadLetterSuppression {
if (seqNr <= 0) throw ReactiveStreamsCompliance.numberOfElementsInRequestMustBePositiveException
}
// --- exceptions ---
final case class RemoteStreamRefActorTerminatedException(msg: String) extends RuntimeException(msg)
final case class RemoteStreamRefFailedException(msg: String) extends RuntimeException(msg)
final case class InvalidSequenceNumberException(expectedSeqNr: Long, gotSeqNr: Long, msg: String)
extends IllegalStateException(s"$msg (expected: $expectedSeqNr, got: $gotSeqNr)")
/**
* Stream refs establish a connection between a local and remote actor, representing the origin and remote sides
* of a stream. Each such actor refers to the other side as its "partner". We make sure that no other actor than
* the initial partner can send demand/messages to the other side accidentally.
*
* This exception is thrown when a message is recived from a non-partner actor,
* which could mean a bug or some actively malicient behaviour from the other side.
*
* This is not meant as a security feature, but rather as plain sanity-check.
*/
final case class InvalidPartnerActorException(expectedRef: ActorRef, gotRef: ActorRef, msg: String)
extends IllegalStateException(s"$msg (expected: $expectedRef, got: $gotRef)")
}

View file

@ -1,61 +0,0 @@
/**
* Copyright (C) 2014-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.remote.impl
import akka.actor.{ Actor, ActorRef, ActorSystem, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider, Props }
import akka.stream.ActorMaterializerHelper
import akka.stream.impl.SeqActorName
import akka.stream.remote.impl.StreamRefsMasterActor.AllocatePusherToRemoteSink
import akka.stream.remote.scaladsl.{ SinkRef, StreamRefSettings }
object StreamRefsMaster extends ExtensionId[StreamRefsMaster] with ExtensionIdProvider {
override def createExtension(system: ExtendedActorSystem): StreamRefsMaster =
new StreamRefsMaster(system)
override def lookup(): StreamRefsMaster.type = this
override def get(system: ActorSystem): StreamRefsMaster = super.get(system)
}
/** INTERNAL API */
private[stream] final class StreamRefsMaster(system: ExtendedActorSystem) extends Extension {
val settings: StreamRefSettings = new StreamRefSettings(system.settings.config)
private[this] val sourceRefOriginSinkNames = SeqActorName("SourceRefOriginSink") // "local origin"
private[this] val sourceRefNames = SeqActorName("SourceRef") // "remote receiver"
private[this] val sinkRefTargetSourceNames = SeqActorName("SinkRefTargetSource") // "local target"
private[this] val sinkRefNames = SeqActorName("SinkRef") // "remote sender"
// TODO do we need it? perhaps for reaping?
// system.systemActorOf(StreamRefsMasterActor.props(), "streamRefsMaster")
def nextSinkRefTargetSourceName(): String =
sinkRefTargetSourceNames.next()
def nextSinkRefName(): String =
sinkRefNames.next()
def nextSourceRefOriginSinkName(): String =
sourceRefOriginSinkNames.next()
def nextSourceRefName(): String =
sourceRefNames.next()
}
object StreamRefsMasterActor {
def props(): Props = Props(new StreamRefsMasterActor())
final case class AllocatePusherToRemoteSink(stageRef: ActorRef)
}
class StreamRefsMasterActor extends Actor {
override def receive: Receive = {
case AllocatePusherToRemoteSink(stageRef)
// context.actorOf()
}
}

View file

@ -1,274 +0,0 @@
/**
* Copyright (C) 2014-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.remote.scaladsl
import java.util.Queue
import akka.actor.{ ActorRef, Terminated }
import akka.event.Logging
import akka.stream._
import akka.stream.actor.{ MaxInFlightRequestStrategy, RequestStrategy, WatermarkRequestStrategy }
import akka.stream.impl.FixedSizeBuffer
import akka.stream.remote.StreamRefs
import akka.stream.remote.impl.StreamRefsMaster
import akka.stream.scaladsl.Source
import akka.stream.stage._
import akka.util.ByteString
import scala.concurrent.{ Future, Promise }
object SinkRef {
def source[T](): Source[T, Future[SinkRef[T]]] =
Source.fromGraph(new SinkRefTargetSource[T]()) // TODO settings?
def bulkTransferSource(port: Int = -1): Source[ByteString, SinkRef[ByteString]] = {
???
}
}
/**
* This stage can only handle a single "sender" (it does not merge values);
* The first that pushes is assumed the one we are to trust
*/
final class SinkRefTargetSource[T]() extends GraphStageWithMaterializedValue[SourceShape[T], Future[SinkRef[T]]] {
val out: Outlet[T] = Outlet[T](s"${Logging.simpleName(getClass)}.out")
override def shape = SourceShape.of(out)
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = {
val promise = Promise[SinkRef[T]]()
val logic = new TimerGraphStageLogic(shape) with StageLogging with OutHandler {
private[this] lazy val streamRefsMaster = StreamRefsMaster(ActorMaterializerHelper.downcast(materializer).system)
private[this] lazy val settings = streamRefsMaster.settings
private[this] var self: GraphStageLogic.StageActor = _
private[this] lazy val selfActorName = streamRefsMaster.nextSinkRefTargetSourceName()
private[this] implicit def selfSender: ActorRef = self.ref
// demand management ---
private val highDemandWatermark = 16
private var expectingSeqNr: Long = 0L
private var localCumulativeDemand: Long = 0L // initialized in preStart with settings.initialDemand
private val receiveBuffer = FixedSizeBuffer[T](highDemandWatermark)
// TODO configurable?
// Request strategies talk in terms of Request(n), which we need to translate to cumulative demand
// TODO the MaxInFlightRequestStrategy is likely better for this use case, yet was a bit weird to use so this one for now
private val requestStrategy: RequestStrategy = WatermarkRequestStrategy(highWatermark = highDemandWatermark)
// end of demand management ---
private var remotePartner: ActorRef = _
override def preStart(): Unit = {
localCumulativeDemand = settings.initialDemand.toLong
self = getStageActor(initialReceive)
log.warning("Allocated receiver: {}", self.ref)
promise.success(new SinkRef(self.ref, settings.initialDemand))
}
override def onPull(): Unit = {
tryPush()
triggerCumulativeDemand()
}
def triggerCumulativeDemand(): Unit =
if (remotePartner ne null) {
val remainingRequested = java.lang.Long.min(highDemandWatermark, localCumulativeDemand - expectingSeqNr).toInt
val addDemand = requestStrategy.requestDemand(remainingRequested)
// only if demand has increased we shoot it right away
// otherwise it's the same demand level, so it'd be triggered via redelivery anyway
if (addDemand > 0) {
localCumulativeDemand += addDemand
val demand = StreamRefs.CumulativeDemand(localCumulativeDemand)
log.warning("[{}] Demanding until [{}] (+{})", selfActorName, localCumulativeDemand, addDemand)
remotePartner ! demand
scheduleDemandRedelivery()
}
}
val DemandRedeliveryTimerKey = "DemandRedeliveryTimerKey"
def scheduleDemandRedelivery() = scheduleOnce(DemandRedeliveryTimerKey, settings.demandRedeliveryInterval)
override protected def onTimer(timerKey: Any): Unit = timerKey match {
case DemandRedeliveryTimerKey
log.debug("[{}] Scheduled re-delivery of demand until [{}]", selfActorName, localCumulativeDemand)
remotePartner ! StreamRefs.CumulativeDemand(localCumulativeDemand)
scheduleDemandRedelivery()
}
lazy val initialReceive: ((ActorRef, Any)) Unit = {
case (sender, msg @ StreamRefs.SequencedOnNext(seqNr, payload))
observeAndValidateSender(sender, "Illegal sender in SequencedOnNext")
observeAndValidateSequenceNr(seqNr, "Illegal sequence nr in SequencedOnNext")
log.warning("Received seq {} from {}", msg, sender)
triggerCumulativeDemand()
tryPush(payload)
case (sender, StreamRefs.RemoteSinkCompleted(seqNr))
observeAndValidateSender(sender, "Illegal sender in RemoteSinkCompleted")
observeAndValidateSequenceNr(seqNr, "Illegal sequence nr in RemoteSinkCompleted")
log.debug("The remote Sink has completed, completing this source as well...")
self.unwatch(sender)
completeStage()
case (sender, StreamRefs.RemoteSinkFailure(reason))
observeAndValidateSender(sender, "Illegal sender in RemoteSinkFailure")
log.debug("The remote Sink has failed, failing (reason: {})", reason)
self.unwatch(sender)
failStage(StreamRefs.RemoteStreamRefActorTerminatedException(s"Remote Sink failed, reason: $reason"))
}
def tryPush(): Unit =
if (isAvailable(out) && receiveBuffer.nonEmpty) {
val elem = receiveBuffer.dequeue()
log.warning(s"PUSHING SIGNALED ${elem} (capacity: ${receiveBuffer.used}/${receiveBuffer.capacity})")
push(out, elem)
}
def tryPush(payload: Any): Unit =
if (isAvailable(out)) {
if (receiveBuffer.nonEmpty) {
val elem = receiveBuffer.dequeue()
push(out, elem)
receiveBuffer.enqueue(payload.asInstanceOf[T])
log.warning(s"PUSHING SIGNALED ${elem} BUFFERING payload" + payload + s"(capacity: ${receiveBuffer.used}/${receiveBuffer.capacity})")
} else {
push(out, payload.asInstanceOf[T])
log.warning(s"PUSHING DIRECTLY ${payload}")
}
} else {
receiveBuffer.enqueue(payload.asInstanceOf[T])
log.warning("PUSHING BUFFERING payload" + payload + s"(capacity: ${receiveBuffer.used}/${receiveBuffer.capacity})")
}
@throws[StreamRefs.InvalidPartnerActorException]
def observeAndValidateSender(sender: ActorRef, msg: String): Unit =
if (remotePartner == null) {
log.debug("Received first message from {}, assuming it to be the remote partner for this stage", sender)
remotePartner = sender
self.watch(sender)
} else if (sender != remotePartner) {
throw StreamRefs.InvalidPartnerActorException(sender, remotePartner, msg)
}
@throws[StreamRefs.InvalidSequenceNumberException]
def observeAndValidateSequenceNr(seqNr: Long, msg: String): Unit =
if (isInvalidSequenceNr(seqNr)) {
throw StreamRefs.InvalidSequenceNumberException(expectingSeqNr, seqNr, msg)
} else {
expectingSeqNr += 1
}
def isInvalidSequenceNr(seqNr: Long): Boolean =
seqNr != expectingSeqNr
setHandler(out, this)
}
(logic, promise.future) // FIXME we'd want to expose just the ref!
}
override def toString: String =
s"${Logging.simpleName(getClass)}()}"
}
/**
* The "handed out" side of a SinkRef. It powers a Source on the other side.
* TODO naming!??!?!!?!?!?!
*
* Do not create this instance directly, but use `SinkRef` factories, to run/setup its targetRef
*/
final class SinkRef[In] private[akka] ( // TODO is it more of a SourceRefSink?
private[akka] val targetRef: ActorRef,
private[akka] val initialDemand: Long
) extends GraphStage[SinkShape[In]] with Serializable { stage
import akka.stream.remote.StreamRefs._
val in = Inlet[In](s"${Logging.simpleName(getClass)}($targetRef).in")
override def shape: SinkShape[In] = SinkShape.of(in)
override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) with StageLogging with InHandler {
private[this] lazy val streamRefsMaster = StreamRefsMaster(ActorMaterializerHelper.downcast(materializer).system)
private[this] override lazy val stageActorName = streamRefsMaster.nextSinkRefName()
// we assume that there is at least SOME buffer space
private[this] var remoteCumulativeDemandReceived = initialDemand
// FIXME this one will be sent over remoting so we have to be able to make that work
private[this] var remoteCumulativeDemandConsumed = 0L
private[this] var self: GraphStageLogic.StageActor = _
implicit def selfSender: ActorRef = self.ref
override def preStart(): Unit = {
self = getStageActor(initialReceive)
self.watch(targetRef)
log.warning("Created SinkRef, pointing to remote Sink receiver: {}, local worker: {}", targetRef, self)
pull(in)
}
lazy val initialReceive: ((ActorRef, Any)) Unit = {
case (_, Terminated(`targetRef`))
failStage(failRemoteTerminated())
case (sender, CumulativeDemand(d))
validatePartnerRef(sender)
if (remoteCumulativeDemandReceived < d) {
remoteCumulativeDemandReceived = d
log.warning("Received cumulative demand [{}], consumable demand: [{}]", CumulativeDemand(d), remoteCumulativeDemandReceived - remoteCumulativeDemandConsumed)
}
tryPull()
}
override def onPush(): Unit = {
val elem = grabSequenced(in)
targetRef ! elem
log.warning("Sending sequenced: {} to {}", elem, targetRef)
tryPull()
}
private def tryPull() =
if (remoteCumulativeDemandConsumed < remoteCumulativeDemandReceived && !hasBeenPulled(in))
pull(in)
private def grabSequenced[T](in: Inlet[T]): SequencedOnNext[T] = {
val onNext = SequencedOnNext(remoteCumulativeDemandConsumed, grab(in))
remoteCumulativeDemandConsumed += 1
onNext
}
override def onUpstreamFailure(ex: Throwable): Unit = {
targetRef ! StreamRefs.RemoteSinkFailure(ex.getMessage) // TODO yes / no? At least the message I guess
self.unwatch(targetRef)
super.onUpstreamFailure(ex)
}
override def onUpstreamFinish(): Unit = {
targetRef ! StreamRefs.RemoteSinkCompleted(remoteCumulativeDemandConsumed)
self.unwatch(targetRef)
super.onUpstreamFinish()
}
setHandler(in, this)
}
private def validatePartnerRef(ref: ActorRef) = {
if (ref != targetRef) throw new RuntimeException("Got demand from weird actor! Not the one I trust hmmmm!!!")
}
private def failRemoteTerminated() = {
RemoteStreamRefActorTerminatedException(s"Remote target receiver of data ${targetRef} terminated. Local stream terminating, message loss (on remote side) may have happened.")
}
override def toString = s"${Logging.simpleName(getClass)}($targetRef)"
}

View file

@ -1,289 +0,0 @@
/**
* Copyright (C) 2014-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.remote.scaladsl
import akka.NotUsed
import akka.actor.ActorRef
import akka.event.Logging
import akka.stream._
import akka.stream.actor.{ RequestStrategy, WatermarkRequestStrategy }
import akka.stream.impl.FixedSizeBuffer
import akka.stream.remote.StreamRefs
import akka.stream.remote.StreamRefs.{ CumulativeDemand, SequencedOnNext }
import akka.stream.remote.impl.StreamRefsMaster
import akka.stream.scaladsl.{ FlowOps, Sink, Source }
import akka.stream.stage._
import akka.util.ByteString
import scala.concurrent.{ Future, Promise }
// FIXME IMPLEMENT THIS
object SourceRef {
def sink[T](): Graph[SinkShape[T], Future[SourceRef[T]]] =
Sink.fromGraph(new SourceRefOriginSink[T]())
def bulkTransfer[T](): Graph[SinkShape[ByteString], SourceRef[ByteString]] = ???
}
final class SourceRefOriginSink[T]() extends GraphStageWithMaterializedValue[SinkShape[T], Future[SourceRef[T]]] {
val in: Inlet[T] = Inlet[T](s"${Logging.simpleName(getClass)}.in")
override def shape: SinkShape[T] = SinkShape.of(in)
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[SourceRef[T]]) = {
val promise = Promise[SourceRef[T]]
val logic = new TimerGraphStageLogic(shape) with InHandler with StageLogging {
private[this] lazy val streamRefsMaster = StreamRefsMaster(ActorMaterializerHelper.downcast(materializer).system)
private[this] lazy val settings = streamRefsMaster.settings
private[this] var remotePartner: ActorRef = _
private[this] var self: GraphStageLogic.StageActor = _
private[this] lazy val selfActorName = streamRefsMaster.nextSinkRefTargetSourceName()
private[this] implicit def selfSender: ActorRef = self.ref
// demand management ---
private var remoteCumulativeDemandReceived: Long = 0L
private var remoteCumulativeDemandConsumed: Long = 0L
// end of demand management ---
override def preStart(): Unit = {
self = getStageActor(initialReceive)
log.warning("Allocated receiver: {}", self.ref)
promise.success(new SourceRef(self.ref))
}
lazy val initialReceive: ((ActorRef, Any)) Unit = {
case (sender, msg @ StreamRefs.CumulativeDemand(demand))
observeAndValidateSender(sender, "Illegal sender in CumulativeDemand")
if (demand > remoteCumulativeDemandReceived) {
remoteCumulativeDemandReceived = demand
log.warning("Received cumulative demand [{}], consumable demand: [{}]", msg,
remoteCumulativeDemandReceived - remoteCumulativeDemandConsumed)
}
tryPull()
}
def tryPull(): Unit =
if (remoteCumulativeDemandConsumed < remoteCumulativeDemandReceived && !hasBeenPulled(in))
pull(in)
private def grabSequenced(in: Inlet[T]): SequencedOnNext[T] = {
val onNext = SequencedOnNext(remoteCumulativeDemandConsumed, grab(in))
remoteCumulativeDemandConsumed += 1
onNext
}
override def onPush(): Unit = {
val elem = grabSequenced(in)
remotePartner ! elem
log.warning("Sending sequenced: {} to {}", elem, remotePartner)
tryPull()
}
@throws[StreamRefs.InvalidPartnerActorException]
def observeAndValidateSender(sender: ActorRef, msg: String): Unit =
if (remotePartner == null) {
log.debug("Received first message from {}, assuming it to be the remote partner for this stage", sender)
remotePartner = sender
self.watch(sender)
} else if (sender != remotePartner) {
throw StreamRefs.InvalidPartnerActorException(sender, remotePartner, msg)
}
// @throws[StreamRefs.InvalidSequenceNumberException]
// def observeAndValidateSequenceNr(seqNr: Long, msg: String): Unit =
// if (isInvalidSequenceNr(seqNr)) {
// throw StreamRefs.InvalidSequenceNumberException(expectingSeqNr, seqNr, msg)
// } else {
// expectingSeqNr += 1
// }
// def isInvalidSequenceNr(seqNr: Long): Boolean =
// seqNr != expectingSeqNr
override def onUpstreamFailure(ex: Throwable): Unit = {
remotePartner ! StreamRefs.RemoteSinkFailure(ex.getMessage) // TODO yes / no? At least the message I guess
self.unwatch(remotePartner)
super.onUpstreamFailure(ex)
}
override def onUpstreamFinish(): Unit = {
remotePartner ! StreamRefs.RemoteSinkCompleted(remoteCumulativeDemandConsumed)
self.unwatch(remotePartner)
super.onUpstreamFinish()
}
setHandler(in, this)
}
(logic, promise.future)
}
}
///// ------------------------------------ FIXME THIS IS A VERBATIM COPY -----------------------------------
///// ------------------------------------ FIXME THIS IS A VERBATIM COPY -----------------------------------
///// ------------------------------------ FIXME THIS IS A VERBATIM COPY -----------------------------------
///// ------------------------------------ FIXME THIS IS A VERBATIM COPY -----------------------------------
/**
* This stage can only handle a single "sender" (it does not merge values);
* The first that pushes is assumed the one we are to trust
*/
// FIXME this is basically SinkRefTargetSource
final class SourceRef[T](private[akka] val originRef: ActorRef) extends GraphStageWithMaterializedValue[SourceShape[T], Future[SinkRef[T]]] {
val out: Outlet[T] = Outlet[T](s"${Logging.simpleName(getClass)}.out")
override def shape = SourceShape.of(out)
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = {
val promise = Promise[SinkRef[T]]()
val logic = new TimerGraphStageLogic(shape) with StageLogging with OutHandler {
private[this] lazy val streamRefsMaster = StreamRefsMaster(ActorMaterializerHelper.downcast(materializer).system)
private[this] lazy val settings = streamRefsMaster.settings
private[this] var self: GraphStageLogic.StageActor = _
private[this] override lazy val stageActorName = streamRefsMaster.nextSinkRefTargetSourceName()
private[this] implicit def selfSender: ActorRef = self.ref
// demand management ---
private val highDemandWatermark = 16
private var expectingSeqNr: Long = 0L
private var localCumulativeDemand: Long = 0L // initialized in preStart with settings.initialDemand
private val receiveBuffer = FixedSizeBuffer[T](highDemandWatermark)
// TODO configurable?
// Request strategies talk in terms of Request(n), which we need to translate to cumulative demand
// TODO the MaxInFlightRequestStrategy is likely better for this use case, yet was a bit weird to use so this one for now
private val requestStrategy: RequestStrategy = WatermarkRequestStrategy(highWatermark = highDemandWatermark)
// end of demand management ---
// TODO we could basically use the other impl... and just pass null as originRef since it'd be obtained from other side...
private var remotePartner: ActorRef = originRef
override def preStart(): Unit = {
localCumulativeDemand = settings.initialDemand.toLong
self = getStageActor(initialReceive)
log.warning("Allocated receiver: {}", self.ref)
promise.success(new SinkRef(self.ref, settings.initialDemand))
}
override def onPull(): Unit = {
tryPush()
triggerCumulativeDemand()
}
def triggerCumulativeDemand(): Unit =
if (remotePartner ne null) {
val remainingRequested = java.lang.Long.min(highDemandWatermark, localCumulativeDemand - expectingSeqNr).toInt
val addDemand = requestStrategy.requestDemand(remainingRequested)
// only if demand has increased we shoot it right away
// otherwise it's the same demand level, so it'd be triggered via redelivery anyway
if (addDemand > 0) {
localCumulativeDemand += addDemand
val demand = StreamRefs.CumulativeDemand(localCumulativeDemand)
log.warning("[{}] Demanding until [{}] (+{})", stageActorName, localCumulativeDemand, addDemand)
remotePartner ! demand
scheduleDemandRedelivery()
}
}
val DemandRedeliveryTimerKey = "DemandRedeliveryTimerKey"
def scheduleDemandRedelivery() = scheduleOnce(DemandRedeliveryTimerKey, settings.demandRedeliveryInterval)
override protected def onTimer(timerKey: Any): Unit = timerKey match {
case DemandRedeliveryTimerKey
log.debug("[{}] Scheduled re-delivery of demand until [{}]", stageActorName, localCumulativeDemand)
remotePartner ! StreamRefs.CumulativeDemand(localCumulativeDemand)
scheduleDemandRedelivery()
}
lazy val initialReceive: ((ActorRef, Any)) Unit = {
case (sender, msg @ StreamRefs.SequencedOnNext(seqNr, payload))
observeAndValidateSender(sender, "Illegal sender in SequencedOnNext")
observeAndValidateSequenceNr(seqNr, "Illegal sequence nr in SequencedOnNext")
log.warning("Received seq {} from {}", msg, sender)
triggerCumulativeDemand()
tryPush(payload)
case (sender, StreamRefs.RemoteSinkCompleted(seqNr))
observeAndValidateSender(sender, "Illegal sender in RemoteSinkCompleted")
observeAndValidateSequenceNr(seqNr, "Illegal sequence nr in RemoteSinkCompleted")
log.debug("The remote Sink has completed, completing this source as well...")
self.unwatch(sender)
completeStage()
case (sender, StreamRefs.RemoteSinkFailure(reason))
observeAndValidateSender(sender, "Illegal sender in RemoteSinkFailure")
log.debug("The remote Sink has failed, failing (reason: {})", reason)
self.unwatch(sender)
failStage(StreamRefs.RemoteStreamRefActorTerminatedException(s"Remote Sink failed, reason: $reason"))
}
def tryPush(): Unit =
if (isAvailable(out) && receiveBuffer.nonEmpty) {
val elem = receiveBuffer.dequeue()
log.warning(s"PUSHING SIGNALED ${elem} (capacity: ${receiveBuffer.used}/${receiveBuffer.capacity})")
push(out, elem)
}
def tryPush(payload: Any): Unit =
if (isAvailable(out)) {
if (receiveBuffer.nonEmpty) {
val elem = receiveBuffer.dequeue()
push(out, elem)
receiveBuffer.enqueue(payload.asInstanceOf[T])
log.warning(s"PUSHING SIGNALED ${elem} BUFFERING payload" + payload + s"(capacity: ${receiveBuffer.used}/${receiveBuffer.capacity})")
} else {
push(out, payload.asInstanceOf[T])
log.warning(s"PUSHING DIRECTLY ${payload}")
}
} else {
receiveBuffer.enqueue(payload.asInstanceOf[T])
log.warning("PUSHING BUFFERING payload" + payload + s"(capacity: ${receiveBuffer.used}/${receiveBuffer.capacity})")
}
@throws[StreamRefs.InvalidPartnerActorException]
def observeAndValidateSender(sender: ActorRef, msg: String): Unit =
if (remotePartner == null) {
log.debug("Received first message from {}, assuming it to be the remote partner for this stage", sender)
remotePartner = sender
self.watch(sender)
} else if (sender != remotePartner) {
throw StreamRefs.InvalidPartnerActorException(sender, remotePartner, msg)
}
@throws[StreamRefs.InvalidSequenceNumberException]
def observeAndValidateSequenceNr(seqNr: Long, msg: String): Unit =
if (isInvalidSequenceNr(seqNr)) {
throw StreamRefs.InvalidSequenceNumberException(expectingSeqNr, seqNr, msg)
} else {
expectingSeqNr += 1
}
def isInvalidSequenceNr(seqNr: Long): Boolean =
seqNr != expectingSeqNr
setHandler(out, this)
}
(logic, promise.future) // FIXME we'd want to expose just the ref!
}
override def toString: String =
s"${Logging.simpleName(getClass)}($originRef)}"
}
///// ------------------------------------ FIXME END OF THIS IS A VERBATIM COPY ----------------------------
///// ------------------------------------ FIXME END OF THIS IS A VERBATIM COPY ----------------------------
///// ------------------------------------ FIXME END OF THIS IS A VERBATIM COPY ----------------------------
///// ------------------------------------ FIXME END OF THIS IS A VERBATIM COPY ----------------------------
///// ------------------------------------ FIXME END OF THIS IS A VERBATIM COPY ----------------------------

View file

@ -1,19 +0,0 @@
/**
* Copyright (C) 2014-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.remote.scaladsl
import java.util.concurrent.TimeUnit
import scala.concurrent.duration._
import com.typesafe.config.Config
final class StreamRefSettings(config: Config) {
private val c = config.getConfig("akka.stream.stream-refs")
val initialDemand = c.getInt("initial-demand")
val demandRedeliveryInterval = c.getDuration("demand-redelivery-interval", TimeUnit.MILLISECONDS).millis
val idleTimeout = c.getDuration("idle-timeout", TimeUnit.MILLISECONDS).millis
}

View file

@ -1,160 +0,0 @@
/**
* Copyright (C) 2009-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.remote.serialization
import akka.actor.ExtendedActorSystem
import akka.protobuf.ByteString
import akka.serialization.{ BaseSerializer, Serialization, SerializationExtension, SerializerWithStringManifest }
import akka.stream.remote.scaladsl.{ SinkRef, SourceRef }
import akka.stream.remote.{ StreamRefContainers, StreamRefs }
final class StreamRefSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest
with BaseSerializer {
private[this] lazy val serialization = SerializationExtension(system)
private[this] val SequencedOnNextManifest = "A"
private[this] val CumulativeDemandManifest = "B"
private[this] val RemoteSinkFailureManifest = "C"
private[this] val RemoteSinkCompletedManifest = "D"
private[this] val SourceRefManifest = "E"
private[this] val SinkRefManifest = "F"
override def manifest(o: AnyRef): String = o match {
// protocol
case _: StreamRefs.SequencedOnNext[_] SequencedOnNextManifest
case _: StreamRefs.CumulativeDemand CumulativeDemandManifest
case _: StreamRefs.RemoteSinkFailure RemoteSinkFailureManifest
case _: StreamRefs.RemoteSinkCompleted RemoteSinkCompletedManifest
// refs
case _: SourceRef[_] SourceRefManifest
case _: SinkRef[_] SinkRefManifest
}
override def toBinary(o: AnyRef): Array[Byte] = o match {
// protocol
case o: StreamRefs.SequencedOnNext[_] serializeSequencedOnNext(o).toByteArray
case d: StreamRefs.CumulativeDemand serializeCumulativeDemand(d).toByteArray
case d: StreamRefs.RemoteSinkFailure serializeRemoteSinkFailure(d).toByteArray
case d: StreamRefs.RemoteSinkCompleted serializeRemoteSinkCompleted(d).toByteArray
// refs
case ref: SinkRef[_] serializeSinkRef(ref).toByteArray
case ref: SourceRef[_] serializeSourceRef(ref).toByteArray
}
override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = manifest match {
// protocol
case SequencedOnNextManifest deserializeSequencedOnNext(bytes)
case CumulativeDemandManifest deserializeCumulativeDemand(bytes)
case RemoteSinkCompletedManifest deserializeRemoteSinkCompleted(bytes)
case RemoteSinkFailureManifest deserializeRemoteSinkFailure(bytes)
// refs
case SinkRefManifest deserializeSinkRef(bytes)
case SourceRefManifest deserializeSourceRef(bytes)
}
// -----
private def serializeCumulativeDemand(d: StreamRefs.CumulativeDemand): StreamRefContainers.CumulativeDemand = {
StreamRefContainers.CumulativeDemand.newBuilder()
.setSeqNr(d.seqNr)
.build()
}
private def serializeRemoteSinkFailure(d: StreamRefs.RemoteSinkFailure): StreamRefContainers.RemoteSinkFailure = {
StreamRefContainers.RemoteSinkFailure.newBuilder()
.setCause(ByteString.copyFrom(d.msg.getBytes))
.build()
}
private def serializeRemoteSinkCompleted(d: StreamRefs.RemoteSinkCompleted): StreamRefContainers.RemoteSinkCompleted = {
StreamRefContainers.RemoteSinkCompleted.newBuilder()
.setSeqNr(d.seqNr)
.build()
}
private def serializeSequencedOnNext(o: StreamRefs.SequencedOnNext[_]) = {
val p = o.payload.asInstanceOf[AnyRef]
val msgSerializer = serialization.findSerializerFor(p)
val payloadBuilder = StreamRefContainers.Payload.newBuilder()
.setEnclosedMessage(ByteString.copyFrom(msgSerializer.toBinary(p)))
.setSerializerId(msgSerializer.identifier)
msgSerializer match {
case ser2: SerializerWithStringManifest
val manifest = ser2.manifest(p)
if (manifest != "")
payloadBuilder.setMessageManifest(ByteString.copyFromUtf8(manifest))
case _
if (msgSerializer.includeManifest)
payloadBuilder.setMessageManifest(ByteString.copyFromUtf8(p.getClass.getName))
}
StreamRefContainers.SequencedOnNext.newBuilder()
.setSeqNr(o.seqNr)
.setPayload(payloadBuilder.build())
.build()
}
private def serializeSinkRef(sink: SinkRef[_]): StreamRefContainers.SinkRef = {
val actorRef = StreamRefContainers.ActorRef.newBuilder()
.setPath(Serialization.serializedActorPath(sink.targetRef))
StreamRefContainers.SinkRef.newBuilder()
.setInitialDemand(sink.initialDemand)
.setTargetRef(actorRef)
.build()
}
private def serializeSourceRef(source: SourceRef[_]): StreamRefContainers.SourceRef = {
val actorRef = StreamRefContainers.ActorRef.newBuilder()
.setPath(Serialization.serializedActorPath(source.originRef))
StreamRefContainers.SourceRef.newBuilder()
.setOriginRef(actorRef)
.build()
}
// ----------
private def deserializeSinkRef(bytes: Array[Byte]): SinkRef[Any] = {
val ref = StreamRefContainers.SinkRef.parseFrom(bytes)
val targetRef = serialization.system.provider.resolveActorRef(ref.getTargetRef.getPath)
new SinkRef[Any](targetRef, ref.getInitialDemand)
}
private def deserializeSourceRef(bytes: Array[Byte]): SourceRef[Any] = {
val ref = StreamRefContainers.SourceRef.parseFrom(bytes)
val targetRef = serialization.system.provider.resolveActorRef(ref.getOriginRef.getPath)
new SourceRef[Any](targetRef)
}
private def deserializeSequencedOnNext(bytes: Array[Byte]): AnyRef = {
val o = StreamRefContainers.SequencedOnNext.parseFrom(bytes)
val p = o.getPayload
val payload = serialization.deserialize(
p.getEnclosedMessage.toByteArray,
p.getSerializerId,
p.getMessageManifest.toStringUtf8
)
StreamRefs.SequencedOnNext(o.getSeqNr, payload.get)
}
private def deserializeCumulativeDemand(bytes: Array[Byte]): StreamRefs.CumulativeDemand = {
val d = StreamRefContainers.CumulativeDemand.parseFrom(bytes)
StreamRefs.CumulativeDemand(d.getSeqNr)
}
private def deserializeRemoteSinkCompleted(bytes: Array[Byte]): StreamRefs.RemoteSinkCompleted = {
val d = StreamRefContainers.RemoteSinkCompleted.parseFrom(bytes)
StreamRefs.RemoteSinkCompleted(d.getSeqNr)
}
private def deserializeRemoteSinkFailure(bytes: Array[Byte]): AnyRef = {
val d = StreamRefContainers.RemoteSinkFailure.parseFrom(bytes)
StreamRefs.RemoteSinkFailure(d.getCause.toStringUtf8)
}
}

View file

@ -10,8 +10,10 @@ import akka.stream.actor.ActorSubscriber
import akka.stream.impl.Stages.DefaultAttributes
import akka.stream.impl._
import akka.stream.impl.fusing.GraphStages
import akka.stream.impl.streamref.{ SinkRefStageImpl, SourceRefStageImpl }
import akka.stream.stage._
import akka.stream.{ javadsl, _ }
import akka.util.OptionVal
import org.reactivestreams.{ Publisher, Subscriber }
import scala.annotation.tailrec
@ -451,4 +453,14 @@ object Sink {
def lazyInit[T, M](sinkFactory: T Future[Sink[T, M]], fallback: () M): Sink[T, Future[M]] =
Sink.fromGraph(new LazySink(sinkFactory, fallback))
/**
* A local [[Sink]] which materializes a [[SourceRef]] which can be used by other streams (including remote ones),
* to consume data from this local stream, as if they were attached in the spot of the local Sink directly.
*
* Adheres to [[StreamRefAttributes]].
*
* See more detailed documentation on [[SourceRef]].
*/
def sourceRef[T](): Sink[T, SourceRef[T]] =
Sink.fromGraph(new SinkRefStageImpl[T](OptionVal.None))
}

View file

@ -4,7 +4,8 @@
package akka.stream.scaladsl
import java.util.concurrent.CompletionStage
import akka.util.ConstantFun
import akka.{ Done, NotUsed }
import akka.actor.{ ActorRef, Cancellable, Props }
import akka.stream.actor.ActorPublisher
import akka.stream.impl.Stages.DefaultAttributes
@ -24,6 +25,12 @@ import scala.compat.java8.FutureConverters._
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ Future, Promise }
import akka.stream.impl.streamref.SourceRefStageImpl
import akka.stream.stage.{ GraphStage, GraphStageWithMaterializedValue }
import akka.util.OptionVal
import scala.compat.java8.FutureConverters._
/**
* A `Source` is a set of stream processing steps that has one open output. It can comprise
* any number of internal sources and transformations that are wired together, or it can be
@ -601,4 +608,15 @@ object Source {
*/
def unfoldResourceAsync[T, S](create: () Future[S], read: (S) Future[Option[T]], close: (S) Future[Done]): Source[T, NotUsed] =
Source.fromGraph(new UnfoldResourceSourceAsync(create, read, close))
/**
* A local [[Sink]] which materializes a [[SourceRef]] which can be used by other streams (including remote ones),
* to consume data from this local stream, as if they were attached in the spot of the local Sink directly.
*
* Adheres to [[StreamRefAttributes]].
*
* See more detailed documentation on [[SinkRef]].
*/
def sinkRef[T](): Source[T, SinkRef[T]] =
Source.fromGraph(new SourceRefStageImpl[T](OptionVal.None))
}

View file

@ -0,0 +1,187 @@
/**
* Copyright (C) 2018 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.stream.serialization
import akka.protobuf.ByteString
import akka.actor.ExtendedActorSystem
import akka.annotation.InternalApi
import akka.serialization.{ BaseSerializer, Serialization, SerializationExtension, SerializerWithStringManifest }
import akka.stream.StreamRefMessages
import akka.stream.impl.streamref._
import scala.concurrent.duration._
import scala.concurrent.Await
/** INTERNAL API */
@InternalApi
private[akka] final class StreamRefSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest
with BaseSerializer {
private[this] lazy val serialization = SerializationExtension(system)
private[this] val SequencedOnNextManifest = "A"
private[this] val CumulativeDemandManifest = "B"
private[this] val RemoteSinkFailureManifest = "C"
private[this] val RemoteSinkCompletedManifest = "D"
private[this] val SourceRefManifest = "E"
private[this] val SinkRefManifest = "F"
private[this] val OnSubscribeHandshakeManifest = "G"
override def manifest(o: AnyRef): String = o match {
// protocol
case _: StreamRefsProtocol.SequencedOnNext[_] SequencedOnNextManifest
case _: StreamRefsProtocol.CumulativeDemand CumulativeDemandManifest
// handshake
case _: StreamRefsProtocol.OnSubscribeHandshake OnSubscribeHandshakeManifest
// completion
case _: StreamRefsProtocol.RemoteStreamFailure RemoteSinkFailureManifest
case _: StreamRefsProtocol.RemoteStreamCompleted RemoteSinkCompletedManifest
// refs
case _: SourceRefImpl[_] SourceRefManifest
case _: MaterializedSourceRef[_] SourceRefManifest
case _: SinkRefImpl[_] SinkRefManifest
case _: MaterializedSinkRef[_] SinkRefManifest
}
override def toBinary(o: AnyRef): Array[Byte] = o match {
// protocol
case o: StreamRefsProtocol.SequencedOnNext[_] serializeSequencedOnNext(o).toByteArray
case d: StreamRefsProtocol.CumulativeDemand serializeCumulativeDemand(d).toByteArray
// handshake
case h: StreamRefsProtocol.OnSubscribeHandshake serializeOnSubscribeHandshake(h).toByteArray
// termination
case d: StreamRefsProtocol.RemoteStreamFailure serializeRemoteSinkFailure(d).toByteArray
case d: StreamRefsProtocol.RemoteStreamCompleted serializeRemoteSinkCompleted(d).toByteArray
// refs
case ref: SinkRefImpl[_] serializeSinkRef(ref).toByteArray
case ref: MaterializedSinkRef[_] serializeSinkRef(Await.result(ref.futureSink, 100.millis)).toByteArray
case ref: SourceRefImpl[_] serializeSourceRef(ref).toByteArray
case ref: MaterializedSourceRef[_] serializeSourceRef(Await.result(ref.futureSource, 100.millis)).toByteArray
}
override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = manifest match {
// protocol
case OnSubscribeHandshakeManifest deserializeOnSubscribeHandshake(bytes)
case SequencedOnNextManifest deserializeSequencedOnNext(bytes)
case CumulativeDemandManifest deserializeCumulativeDemand(bytes)
case RemoteSinkCompletedManifest deserializeRemoteStreamCompleted(bytes)
case RemoteSinkFailureManifest deserializeRemoteStreamFailure(bytes)
// refs
case SinkRefManifest deserializeSinkRef(bytes)
case SourceRefManifest deserializeSourceRef(bytes)
}
// -----
private def serializeCumulativeDemand(d: StreamRefsProtocol.CumulativeDemand): StreamRefMessages.CumulativeDemand = {
StreamRefMessages.CumulativeDemand.newBuilder()
.setSeqNr(d.seqNr)
.build()
}
private def serializeRemoteSinkFailure(d: StreamRefsProtocol.RemoteStreamFailure): StreamRefMessages.RemoteStreamFailure = {
StreamRefMessages.RemoteStreamFailure.newBuilder()
.setCause(ByteString.copyFrom(d.msg.getBytes))
.build()
}
private def serializeRemoteSinkCompleted(d: StreamRefsProtocol.RemoteStreamCompleted): StreamRefMessages.RemoteStreamCompleted = {
StreamRefMessages.RemoteStreamCompleted.newBuilder()
.setSeqNr(d.seqNr)
.build()
}
private def serializeOnSubscribeHandshake(o: StreamRefsProtocol.OnSubscribeHandshake): StreamRefMessages.OnSubscribeHandshake = {
StreamRefMessages.OnSubscribeHandshake.newBuilder()
.setTargetRef(StreamRefMessages.ActorRef.newBuilder()
.setPath(Serialization.serializedActorPath(o.targetRef)))
.build()
}
private def serializeSequencedOnNext(o: StreamRefsProtocol.SequencedOnNext[_]) = {
val p = o.payload.asInstanceOf[AnyRef]
val msgSerializer = serialization.findSerializerFor(p)
val payloadBuilder = StreamRefMessages.Payload.newBuilder()
.setEnclosedMessage(ByteString.copyFrom(msgSerializer.toBinary(p)))
.setSerializerId(msgSerializer.identifier)
msgSerializer match {
case ser2: SerializerWithStringManifest
val manifest = ser2.manifest(p)
if (manifest != "")
payloadBuilder.setMessageManifest(ByteString.copyFromUtf8(manifest))
case _
if (msgSerializer.includeManifest)
payloadBuilder.setMessageManifest(ByteString.copyFromUtf8(p.getClass.getName))
}
StreamRefMessages.SequencedOnNext.newBuilder()
.setSeqNr(o.seqNr)
.setPayload(payloadBuilder.build())
.build()
}
private def serializeSinkRef(sink: SinkRefImpl[_]): StreamRefMessages.SinkRef = {
StreamRefMessages.SinkRef.newBuilder()
.setTargetRef(StreamRefMessages.ActorRef.newBuilder()
.setPath(Serialization.serializedActorPath(sink.initialPartnerRef)))
.build()
}
private def serializeSourceRef(source: SourceRefImpl[_]): StreamRefMessages.SourceRef = {
StreamRefMessages.SourceRef.newBuilder()
.setOriginRef(
StreamRefMessages.ActorRef.newBuilder()
.setPath(Serialization.serializedActorPath(source.initialPartnerRef)))
.build()
}
// ----------
private def deserializeOnSubscribeHandshake(bytes: Array[Byte]): StreamRefsProtocol.OnSubscribeHandshake = {
val handshake = StreamRefMessages.OnSubscribeHandshake.parseFrom(bytes)
val targetRef = serialization.system.provider.resolveActorRef(handshake.getTargetRef.getPath)
StreamRefsProtocol.OnSubscribeHandshake(targetRef)
}
private def deserializeSinkRef(bytes: Array[Byte]): SinkRefImpl[Any] = {
val ref = StreamRefMessages.SinkRef.parseFrom(bytes)
val initialTargetRef = serialization.system.provider.resolveActorRef(ref.getTargetRef.getPath)
SinkRefImpl[Any](initialTargetRef)
}
private def deserializeSourceRef(bytes: Array[Byte]): SourceRefImpl[Any] = {
val ref = StreamRefMessages.SourceRef.parseFrom(bytes)
val initialPartnerRef = serialization.system.provider.resolveActorRef(ref.getOriginRef.getPath)
SourceRefImpl[Any](initialPartnerRef)
}
private def deserializeSequencedOnNext(bytes: Array[Byte]): StreamRefsProtocol.SequencedOnNext[AnyRef] = {
val o = StreamRefMessages.SequencedOnNext.parseFrom(bytes)
val p = o.getPayload
val payload = serialization.deserialize(
p.getEnclosedMessage.toByteArray,
p.getSerializerId,
p.getMessageManifest.toStringUtf8
)
StreamRefsProtocol.SequencedOnNext(o.getSeqNr, payload.get)
}
private def deserializeCumulativeDemand(bytes: Array[Byte]): StreamRefsProtocol.CumulativeDemand = {
val d = StreamRefMessages.CumulativeDemand.parseFrom(bytes)
StreamRefsProtocol.CumulativeDemand(d.getSeqNr)
}
private def deserializeRemoteStreamCompleted(bytes: Array[Byte]): StreamRefsProtocol.RemoteStreamCompleted = {
val d = StreamRefMessages.RemoteStreamCompleted.parseFrom(bytes)
StreamRefsProtocol.RemoteStreamCompleted(d.getSeqNr)
}
private def deserializeRemoteStreamFailure(bytes: Array[Byte]): AnyRef = {
val d = StreamRefMessages.RemoteStreamFailure.parseFrom(bytes)
StreamRefsProtocol.RemoteStreamFailure(d.getCause.toStringUtf8)
}
}

View file

@ -1185,7 +1185,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
* Returns an empty string by default, which means that the name will a unique generated String (e.g. "$$a").
*/
@ApiMayChange
final protected def stageActorName: String = ""
protected def stageActorName: String = ""
// Internal hooks to avoid reliance on user calling super in preStart
/** INTERNAL API */