+str #24229 first working SinkRef, though serialization is wrong somehow (when serialize-messages = on)
silly serialization mistake, should have fixed serialize as well tage actors now can have names, which helps a lot in debugging thread weirdness make sure to fail properly, actually go over remoting issue with not receiving the SinkRef... what initial working SinkRef over remoting remote Sink failure must fail origin Source as well cleaning up and adding failyre handling SinkRef now with low-watermark RequestStrategy source ref works, yet completely duplicated code
This commit is contained in:
parent
b48d907d66
commit
682b047ac3
17 changed files with 8145 additions and 15 deletions
|
|
@ -9,10 +9,12 @@ import scala.util.control.NonFatal
|
||||||
import scala.collection.immutable
|
import scala.collection.immutable
|
||||||
import akka.actor._
|
import akka.actor._
|
||||||
import akka.serialization.SerializationExtension
|
import akka.serialization.SerializationExtension
|
||||||
import akka.util.{ Unsafe, Helpers }
|
import akka.util.{ Helpers, Unsafe }
|
||||||
import akka.serialization.SerializerWithStringManifest
|
import akka.serialization.SerializerWithStringManifest
|
||||||
import java.util.Optional
|
import java.util.Optional
|
||||||
|
|
||||||
|
import akka.event.Logging
|
||||||
|
|
||||||
private[akka] object Children {
|
private[akka] object Children {
|
||||||
val GetNobody = () ⇒ Nobody
|
val GetNobody = () ⇒ Nobody
|
||||||
}
|
}
|
||||||
|
|
@ -192,7 +194,8 @@ private[akka] trait Children { this: ActorCell ⇒
|
||||||
|
|
||||||
protected def getAllChildStats: immutable.Iterable[ChildRestartStats] = childrenRefs.stats
|
protected def getAllChildStats: immutable.Iterable[ChildRestartStats] = childrenRefs.stats
|
||||||
|
|
||||||
override def getSingleChild(name: String): InternalActorRef =
|
override def getSingleChild(name: String): InternalActorRef = {
|
||||||
|
|
||||||
if (name.indexOf('#') == -1) {
|
if (name.indexOf('#') == -1) {
|
||||||
// optimization for the non-uid case
|
// optimization for the non-uid case
|
||||||
getChildByName(name) match {
|
getChildByName(name) match {
|
||||||
|
|
@ -207,6 +210,7 @@ private[akka] trait Children { this: ActorCell ⇒
|
||||||
case _ ⇒ getFunctionRefOrNobody(childName, uid)
|
case _ ⇒ getFunctionRefOrNobody(childName, uid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
protected def removeChildAndGetStateChange(child: ActorRef): Option[SuspendReason] = {
|
protected def removeChildAndGetStateChange(child: ActorRef): Option[SuspendReason] = {
|
||||||
@tailrec def removeChild(ref: ActorRef): ChildrenContainer = {
|
@tailrec def removeChild(ref: ActorRef): ChildrenContainer = {
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,823 @@
|
||||||
|
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||||
|
// source: FlightAppModels.proto
|
||||||
|
|
||||||
|
package docs.persistence.proto;
|
||||||
|
|
||||||
|
public final class FlightAppModels {
|
||||||
|
private FlightAppModels() {}
|
||||||
|
public static void registerAllExtensions(
|
||||||
|
akka.protobuf.ExtensionRegistry registry) {
|
||||||
|
}
|
||||||
|
public interface SeatReservedOrBuilder
|
||||||
|
extends akka.protobuf.MessageOrBuilder {
|
||||||
|
|
||||||
|
// required string letter = 1;
|
||||||
|
/**
|
||||||
|
* <code>required string letter = 1;</code>
|
||||||
|
*/
|
||||||
|
boolean hasLetter();
|
||||||
|
/**
|
||||||
|
* <code>required string letter = 1;</code>
|
||||||
|
*/
|
||||||
|
java.lang.String getLetter();
|
||||||
|
/**
|
||||||
|
* <code>required string letter = 1;</code>
|
||||||
|
*/
|
||||||
|
akka.protobuf.ByteString
|
||||||
|
getLetterBytes();
|
||||||
|
|
||||||
|
// required uint32 row = 2;
|
||||||
|
/**
|
||||||
|
* <code>required uint32 row = 2;</code>
|
||||||
|
*/
|
||||||
|
boolean hasRow();
|
||||||
|
/**
|
||||||
|
* <code>required uint32 row = 2;</code>
|
||||||
|
*/
|
||||||
|
int getRow();
|
||||||
|
|
||||||
|
// optional string seatType = 3;
|
||||||
|
/**
|
||||||
|
* <code>optional string seatType = 3;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the new field
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
boolean hasSeatType();
|
||||||
|
/**
|
||||||
|
* <code>optional string seatType = 3;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the new field
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
java.lang.String getSeatType();
|
||||||
|
/**
|
||||||
|
* <code>optional string seatType = 3;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the new field
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
akka.protobuf.ByteString
|
||||||
|
getSeatTypeBytes();
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Protobuf type {@code docs.persistence.SeatReserved}
|
||||||
|
*/
|
||||||
|
public static final class SeatReserved extends
|
||||||
|
akka.protobuf.GeneratedMessage
|
||||||
|
implements SeatReservedOrBuilder {
|
||||||
|
// Use SeatReserved.newBuilder() to construct.
|
||||||
|
private SeatReserved(akka.protobuf.GeneratedMessage.Builder<?> builder) {
|
||||||
|
super(builder);
|
||||||
|
this.unknownFields = builder.getUnknownFields();
|
||||||
|
}
|
||||||
|
private SeatReserved(boolean noInit) { this.unknownFields = akka.protobuf.UnknownFieldSet.getDefaultInstance(); }
|
||||||
|
|
||||||
|
private static final SeatReserved defaultInstance;
|
||||||
|
public static SeatReserved getDefaultInstance() {
|
||||||
|
return defaultInstance;
|
||||||
|
}
|
||||||
|
|
||||||
|
public SeatReserved getDefaultInstanceForType() {
|
||||||
|
return defaultInstance;
|
||||||
|
}
|
||||||
|
|
||||||
|
private final akka.protobuf.UnknownFieldSet unknownFields;
|
||||||
|
@java.lang.Override
|
||||||
|
public final akka.protobuf.UnknownFieldSet
|
||||||
|
getUnknownFields() {
|
||||||
|
return this.unknownFields;
|
||||||
|
}
|
||||||
|
private SeatReserved(
|
||||||
|
akka.protobuf.CodedInputStream input,
|
||||||
|
akka.protobuf.ExtensionRegistryLite extensionRegistry)
|
||||||
|
throws akka.protobuf.InvalidProtocolBufferException {
|
||||||
|
initFields();
|
||||||
|
int mutable_bitField0_ = 0;
|
||||||
|
akka.protobuf.UnknownFieldSet.Builder unknownFields =
|
||||||
|
akka.protobuf.UnknownFieldSet.newBuilder();
|
||||||
|
try {
|
||||||
|
boolean done = false;
|
||||||
|
while (!done) {
|
||||||
|
int tag = input.readTag();
|
||||||
|
switch (tag) {
|
||||||
|
case 0:
|
||||||
|
done = true;
|
||||||
|
break;
|
||||||
|
default: {
|
||||||
|
if (!parseUnknownField(input, unknownFields,
|
||||||
|
extensionRegistry, tag)) {
|
||||||
|
done = true;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 10: {
|
||||||
|
bitField0_ |= 0x00000001;
|
||||||
|
letter_ = input.readBytes();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 16: {
|
||||||
|
bitField0_ |= 0x00000002;
|
||||||
|
row_ = input.readUInt32();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 26: {
|
||||||
|
bitField0_ |= 0x00000004;
|
||||||
|
seatType_ = input.readBytes();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (akka.protobuf.InvalidProtocolBufferException e) {
|
||||||
|
throw e.setUnfinishedMessage(this);
|
||||||
|
} catch (java.io.IOException e) {
|
||||||
|
throw new akka.protobuf.InvalidProtocolBufferException(
|
||||||
|
e.getMessage()).setUnfinishedMessage(this);
|
||||||
|
} finally {
|
||||||
|
this.unknownFields = unknownFields.build();
|
||||||
|
makeExtensionsImmutable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
public static final akka.protobuf.Descriptors.Descriptor
|
||||||
|
getDescriptor() {
|
||||||
|
return docs.persistence.proto.FlightAppModels.internal_static_docs_persistence_SeatReserved_descriptor;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected akka.protobuf.GeneratedMessage.FieldAccessorTable
|
||||||
|
internalGetFieldAccessorTable() {
|
||||||
|
return docs.persistence.proto.FlightAppModels.internal_static_docs_persistence_SeatReserved_fieldAccessorTable
|
||||||
|
.ensureFieldAccessorsInitialized(
|
||||||
|
docs.persistence.proto.FlightAppModels.SeatReserved.class, docs.persistence.proto.FlightAppModels.SeatReserved.Builder.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static akka.protobuf.Parser<SeatReserved> PARSER =
|
||||||
|
new akka.protobuf.AbstractParser<SeatReserved>() {
|
||||||
|
public SeatReserved parsePartialFrom(
|
||||||
|
akka.protobuf.CodedInputStream input,
|
||||||
|
akka.protobuf.ExtensionRegistryLite extensionRegistry)
|
||||||
|
throws akka.protobuf.InvalidProtocolBufferException {
|
||||||
|
return new SeatReserved(input, extensionRegistry);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
@java.lang.Override
|
||||||
|
public akka.protobuf.Parser<SeatReserved> getParserForType() {
|
||||||
|
return PARSER;
|
||||||
|
}
|
||||||
|
|
||||||
|
private int bitField0_;
|
||||||
|
// required string letter = 1;
|
||||||
|
public static final int LETTER_FIELD_NUMBER = 1;
|
||||||
|
private java.lang.Object letter_;
|
||||||
|
/**
|
||||||
|
* <code>required string letter = 1;</code>
|
||||||
|
*/
|
||||||
|
public boolean hasLetter() {
|
||||||
|
return ((bitField0_ & 0x00000001) == 0x00000001);
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>required string letter = 1;</code>
|
||||||
|
*/
|
||||||
|
public java.lang.String getLetter() {
|
||||||
|
java.lang.Object ref = letter_;
|
||||||
|
if (ref instanceof java.lang.String) {
|
||||||
|
return (java.lang.String) ref;
|
||||||
|
} else {
|
||||||
|
akka.protobuf.ByteString bs =
|
||||||
|
(akka.protobuf.ByteString) ref;
|
||||||
|
java.lang.String s = bs.toStringUtf8();
|
||||||
|
if (bs.isValidUtf8()) {
|
||||||
|
letter_ = s;
|
||||||
|
}
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>required string letter = 1;</code>
|
||||||
|
*/
|
||||||
|
public akka.protobuf.ByteString
|
||||||
|
getLetterBytes() {
|
||||||
|
java.lang.Object ref = letter_;
|
||||||
|
if (ref instanceof java.lang.String) {
|
||||||
|
akka.protobuf.ByteString b =
|
||||||
|
akka.protobuf.ByteString.copyFromUtf8(
|
||||||
|
(java.lang.String) ref);
|
||||||
|
letter_ = b;
|
||||||
|
return b;
|
||||||
|
} else {
|
||||||
|
return (akka.protobuf.ByteString) ref;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// required uint32 row = 2;
|
||||||
|
public static final int ROW_FIELD_NUMBER = 2;
|
||||||
|
private int row_;
|
||||||
|
/**
|
||||||
|
* <code>required uint32 row = 2;</code>
|
||||||
|
*/
|
||||||
|
public boolean hasRow() {
|
||||||
|
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>required uint32 row = 2;</code>
|
||||||
|
*/
|
||||||
|
public int getRow() {
|
||||||
|
return row_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// optional string seatType = 3;
|
||||||
|
public static final int SEATTYPE_FIELD_NUMBER = 3;
|
||||||
|
private java.lang.Object seatType_;
|
||||||
|
/**
|
||||||
|
* <code>optional string seatType = 3;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the new field
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public boolean hasSeatType() {
|
||||||
|
return ((bitField0_ & 0x00000004) == 0x00000004);
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional string seatType = 3;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the new field
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public java.lang.String getSeatType() {
|
||||||
|
java.lang.Object ref = seatType_;
|
||||||
|
if (ref instanceof java.lang.String) {
|
||||||
|
return (java.lang.String) ref;
|
||||||
|
} else {
|
||||||
|
akka.protobuf.ByteString bs =
|
||||||
|
(akka.protobuf.ByteString) ref;
|
||||||
|
java.lang.String s = bs.toStringUtf8();
|
||||||
|
if (bs.isValidUtf8()) {
|
||||||
|
seatType_ = s;
|
||||||
|
}
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional string seatType = 3;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the new field
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public akka.protobuf.ByteString
|
||||||
|
getSeatTypeBytes() {
|
||||||
|
java.lang.Object ref = seatType_;
|
||||||
|
if (ref instanceof java.lang.String) {
|
||||||
|
akka.protobuf.ByteString b =
|
||||||
|
akka.protobuf.ByteString.copyFromUtf8(
|
||||||
|
(java.lang.String) ref);
|
||||||
|
seatType_ = b;
|
||||||
|
return b;
|
||||||
|
} else {
|
||||||
|
return (akka.protobuf.ByteString) ref;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void initFields() {
|
||||||
|
letter_ = "";
|
||||||
|
row_ = 0;
|
||||||
|
seatType_ = "";
|
||||||
|
}
|
||||||
|
private byte memoizedIsInitialized = -1;
|
||||||
|
public final boolean isInitialized() {
|
||||||
|
byte isInitialized = memoizedIsInitialized;
|
||||||
|
if (isInitialized != -1) return isInitialized == 1;
|
||||||
|
|
||||||
|
if (!hasLetter()) {
|
||||||
|
memoizedIsInitialized = 0;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!hasRow()) {
|
||||||
|
memoizedIsInitialized = 0;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
memoizedIsInitialized = 1;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void writeTo(akka.protobuf.CodedOutputStream output)
|
||||||
|
throws java.io.IOException {
|
||||||
|
getSerializedSize();
|
||||||
|
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
||||||
|
output.writeBytes(1, getLetterBytes());
|
||||||
|
}
|
||||||
|
if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
||||||
|
output.writeUInt32(2, row_);
|
||||||
|
}
|
||||||
|
if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
||||||
|
output.writeBytes(3, getSeatTypeBytes());
|
||||||
|
}
|
||||||
|
getUnknownFields().writeTo(output);
|
||||||
|
}
|
||||||
|
|
||||||
|
private int memoizedSerializedSize = -1;
|
||||||
|
public int getSerializedSize() {
|
||||||
|
int size = memoizedSerializedSize;
|
||||||
|
if (size != -1) return size;
|
||||||
|
|
||||||
|
size = 0;
|
||||||
|
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
||||||
|
size += akka.protobuf.CodedOutputStream
|
||||||
|
.computeBytesSize(1, getLetterBytes());
|
||||||
|
}
|
||||||
|
if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
||||||
|
size += akka.protobuf.CodedOutputStream
|
||||||
|
.computeUInt32Size(2, row_);
|
||||||
|
}
|
||||||
|
if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
||||||
|
size += akka.protobuf.CodedOutputStream
|
||||||
|
.computeBytesSize(3, getSeatTypeBytes());
|
||||||
|
}
|
||||||
|
size += getUnknownFields().getSerializedSize();
|
||||||
|
memoizedSerializedSize = size;
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static final long serialVersionUID = 0L;
|
||||||
|
@java.lang.Override
|
||||||
|
protected java.lang.Object writeReplace()
|
||||||
|
throws java.io.ObjectStreamException {
|
||||||
|
return super.writeReplace();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(
|
||||||
|
akka.protobuf.ByteString data)
|
||||||
|
throws akka.protobuf.InvalidProtocolBufferException {
|
||||||
|
return PARSER.parseFrom(data);
|
||||||
|
}
|
||||||
|
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(
|
||||||
|
akka.protobuf.ByteString data,
|
||||||
|
akka.protobuf.ExtensionRegistryLite extensionRegistry)
|
||||||
|
throws akka.protobuf.InvalidProtocolBufferException {
|
||||||
|
return PARSER.parseFrom(data, extensionRegistry);
|
||||||
|
}
|
||||||
|
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(byte[] data)
|
||||||
|
throws akka.protobuf.InvalidProtocolBufferException {
|
||||||
|
return PARSER.parseFrom(data);
|
||||||
|
}
|
||||||
|
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(
|
||||||
|
byte[] data,
|
||||||
|
akka.protobuf.ExtensionRegistryLite extensionRegistry)
|
||||||
|
throws akka.protobuf.InvalidProtocolBufferException {
|
||||||
|
return PARSER.parseFrom(data, extensionRegistry);
|
||||||
|
}
|
||||||
|
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(java.io.InputStream input)
|
||||||
|
throws java.io.IOException {
|
||||||
|
return PARSER.parseFrom(input);
|
||||||
|
}
|
||||||
|
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(
|
||||||
|
java.io.InputStream input,
|
||||||
|
akka.protobuf.ExtensionRegistryLite extensionRegistry)
|
||||||
|
throws java.io.IOException {
|
||||||
|
return PARSER.parseFrom(input, extensionRegistry);
|
||||||
|
}
|
||||||
|
public static docs.persistence.proto.FlightAppModels.SeatReserved parseDelimitedFrom(java.io.InputStream input)
|
||||||
|
throws java.io.IOException {
|
||||||
|
return PARSER.parseDelimitedFrom(input);
|
||||||
|
}
|
||||||
|
public static docs.persistence.proto.FlightAppModels.SeatReserved parseDelimitedFrom(
|
||||||
|
java.io.InputStream input,
|
||||||
|
akka.protobuf.ExtensionRegistryLite extensionRegistry)
|
||||||
|
throws java.io.IOException {
|
||||||
|
return PARSER.parseDelimitedFrom(input, extensionRegistry);
|
||||||
|
}
|
||||||
|
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(
|
||||||
|
akka.protobuf.CodedInputStream input)
|
||||||
|
throws java.io.IOException {
|
||||||
|
return PARSER.parseFrom(input);
|
||||||
|
}
|
||||||
|
public static docs.persistence.proto.FlightAppModels.SeatReserved parseFrom(
|
||||||
|
akka.protobuf.CodedInputStream input,
|
||||||
|
akka.protobuf.ExtensionRegistryLite extensionRegistry)
|
||||||
|
throws java.io.IOException {
|
||||||
|
return PARSER.parseFrom(input, extensionRegistry);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Builder newBuilder() { return Builder.create(); }
|
||||||
|
public Builder newBuilderForType() { return newBuilder(); }
|
||||||
|
public static Builder newBuilder(docs.persistence.proto.FlightAppModels.SeatReserved prototype) {
|
||||||
|
return newBuilder().mergeFrom(prototype);
|
||||||
|
}
|
||||||
|
public Builder toBuilder() { return newBuilder(this); }
|
||||||
|
|
||||||
|
@java.lang.Override
|
||||||
|
protected Builder newBuilderForType(
|
||||||
|
akka.protobuf.GeneratedMessage.BuilderParent parent) {
|
||||||
|
Builder builder = new Builder(parent);
|
||||||
|
return builder;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Protobuf type {@code docs.persistence.SeatReserved}
|
||||||
|
*/
|
||||||
|
public static final class Builder extends
|
||||||
|
akka.protobuf.GeneratedMessage.Builder<Builder>
|
||||||
|
implements docs.persistence.proto.FlightAppModels.SeatReservedOrBuilder {
|
||||||
|
public static final akka.protobuf.Descriptors.Descriptor
|
||||||
|
getDescriptor() {
|
||||||
|
return docs.persistence.proto.FlightAppModels.internal_static_docs_persistence_SeatReserved_descriptor;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected akka.protobuf.GeneratedMessage.FieldAccessorTable
|
||||||
|
internalGetFieldAccessorTable() {
|
||||||
|
return docs.persistence.proto.FlightAppModels.internal_static_docs_persistence_SeatReserved_fieldAccessorTable
|
||||||
|
.ensureFieldAccessorsInitialized(
|
||||||
|
docs.persistence.proto.FlightAppModels.SeatReserved.class, docs.persistence.proto.FlightAppModels.SeatReserved.Builder.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct using docs.persistence.proto.FlightAppModels.SeatReserved.newBuilder()
|
||||||
|
private Builder() {
|
||||||
|
maybeForceBuilderInitialization();
|
||||||
|
}
|
||||||
|
|
||||||
|
private Builder(
|
||||||
|
akka.protobuf.GeneratedMessage.BuilderParent parent) {
|
||||||
|
super(parent);
|
||||||
|
maybeForceBuilderInitialization();
|
||||||
|
}
|
||||||
|
private void maybeForceBuilderInitialization() {
|
||||||
|
if (akka.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
private static Builder create() {
|
||||||
|
return new Builder();
|
||||||
|
}
|
||||||
|
|
||||||
|
public Builder clear() {
|
||||||
|
super.clear();
|
||||||
|
letter_ = "";
|
||||||
|
bitField0_ = (bitField0_ & ~0x00000001);
|
||||||
|
row_ = 0;
|
||||||
|
bitField0_ = (bitField0_ & ~0x00000002);
|
||||||
|
seatType_ = "";
|
||||||
|
bitField0_ = (bitField0_ & ~0x00000004);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Builder clone() {
|
||||||
|
return create().mergeFrom(buildPartial());
|
||||||
|
}
|
||||||
|
|
||||||
|
public akka.protobuf.Descriptors.Descriptor
|
||||||
|
getDescriptorForType() {
|
||||||
|
return docs.persistence.proto.FlightAppModels.internal_static_docs_persistence_SeatReserved_descriptor;
|
||||||
|
}
|
||||||
|
|
||||||
|
public docs.persistence.proto.FlightAppModels.SeatReserved getDefaultInstanceForType() {
|
||||||
|
return docs.persistence.proto.FlightAppModels.SeatReserved.getDefaultInstance();
|
||||||
|
}
|
||||||
|
|
||||||
|
public docs.persistence.proto.FlightAppModels.SeatReserved build() {
|
||||||
|
docs.persistence.proto.FlightAppModels.SeatReserved result = buildPartial();
|
||||||
|
if (!result.isInitialized()) {
|
||||||
|
throw newUninitializedMessageException(result);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
public docs.persistence.proto.FlightAppModels.SeatReserved buildPartial() {
|
||||||
|
docs.persistence.proto.FlightAppModels.SeatReserved result = new docs.persistence.proto.FlightAppModels.SeatReserved(this);
|
||||||
|
int from_bitField0_ = bitField0_;
|
||||||
|
int to_bitField0_ = 0;
|
||||||
|
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
||||||
|
to_bitField0_ |= 0x00000001;
|
||||||
|
}
|
||||||
|
result.letter_ = letter_;
|
||||||
|
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
||||||
|
to_bitField0_ |= 0x00000002;
|
||||||
|
}
|
||||||
|
result.row_ = row_;
|
||||||
|
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
||||||
|
to_bitField0_ |= 0x00000004;
|
||||||
|
}
|
||||||
|
result.seatType_ = seatType_;
|
||||||
|
result.bitField0_ = to_bitField0_;
|
||||||
|
onBuilt();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Builder mergeFrom(akka.protobuf.Message other) {
|
||||||
|
if (other instanceof docs.persistence.proto.FlightAppModels.SeatReserved) {
|
||||||
|
return mergeFrom((docs.persistence.proto.FlightAppModels.SeatReserved)other);
|
||||||
|
} else {
|
||||||
|
super.mergeFrom(other);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public Builder mergeFrom(docs.persistence.proto.FlightAppModels.SeatReserved other) {
|
||||||
|
if (other == docs.persistence.proto.FlightAppModels.SeatReserved.getDefaultInstance()) return this;
|
||||||
|
if (other.hasLetter()) {
|
||||||
|
bitField0_ |= 0x00000001;
|
||||||
|
letter_ = other.letter_;
|
||||||
|
onChanged();
|
||||||
|
}
|
||||||
|
if (other.hasRow()) {
|
||||||
|
setRow(other.getRow());
|
||||||
|
}
|
||||||
|
if (other.hasSeatType()) {
|
||||||
|
bitField0_ |= 0x00000004;
|
||||||
|
seatType_ = other.seatType_;
|
||||||
|
onChanged();
|
||||||
|
}
|
||||||
|
this.mergeUnknownFields(other.getUnknownFields());
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public final boolean isInitialized() {
|
||||||
|
if (!hasLetter()) {
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!hasRow()) {
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Builder mergeFrom(
|
||||||
|
akka.protobuf.CodedInputStream input,
|
||||||
|
akka.protobuf.ExtensionRegistryLite extensionRegistry)
|
||||||
|
throws java.io.IOException {
|
||||||
|
docs.persistence.proto.FlightAppModels.SeatReserved parsedMessage = null;
|
||||||
|
try {
|
||||||
|
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
|
||||||
|
} catch (akka.protobuf.InvalidProtocolBufferException e) {
|
||||||
|
parsedMessage = (docs.persistence.proto.FlightAppModels.SeatReserved) e.getUnfinishedMessage();
|
||||||
|
throw e;
|
||||||
|
} finally {
|
||||||
|
if (parsedMessage != null) {
|
||||||
|
mergeFrom(parsedMessage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
private int bitField0_;
|
||||||
|
|
||||||
|
// required string letter = 1;
|
||||||
|
private java.lang.Object letter_ = "";
|
||||||
|
/**
|
||||||
|
* <code>required string letter = 1;</code>
|
||||||
|
*/
|
||||||
|
public boolean hasLetter() {
|
||||||
|
return ((bitField0_ & 0x00000001) == 0x00000001);
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>required string letter = 1;</code>
|
||||||
|
*/
|
||||||
|
public java.lang.String getLetter() {
|
||||||
|
java.lang.Object ref = letter_;
|
||||||
|
if (!(ref instanceof java.lang.String)) {
|
||||||
|
java.lang.String s = ((akka.protobuf.ByteString) ref)
|
||||||
|
.toStringUtf8();
|
||||||
|
letter_ = s;
|
||||||
|
return s;
|
||||||
|
} else {
|
||||||
|
return (java.lang.String) ref;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>required string letter = 1;</code>
|
||||||
|
*/
|
||||||
|
public akka.protobuf.ByteString
|
||||||
|
getLetterBytes() {
|
||||||
|
java.lang.Object ref = letter_;
|
||||||
|
if (ref instanceof String) {
|
||||||
|
akka.protobuf.ByteString b =
|
||||||
|
akka.protobuf.ByteString.copyFromUtf8(
|
||||||
|
(java.lang.String) ref);
|
||||||
|
letter_ = b;
|
||||||
|
return b;
|
||||||
|
} else {
|
||||||
|
return (akka.protobuf.ByteString) ref;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>required string letter = 1;</code>
|
||||||
|
*/
|
||||||
|
public Builder setLetter(
|
||||||
|
java.lang.String value) {
|
||||||
|
if (value == null) {
|
||||||
|
throw new NullPointerException();
|
||||||
|
}
|
||||||
|
bitField0_ |= 0x00000001;
|
||||||
|
letter_ = value;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>required string letter = 1;</code>
|
||||||
|
*/
|
||||||
|
public Builder clearLetter() {
|
||||||
|
bitField0_ = (bitField0_ & ~0x00000001);
|
||||||
|
letter_ = getDefaultInstance().getLetter();
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>required string letter = 1;</code>
|
||||||
|
*/
|
||||||
|
public Builder setLetterBytes(
|
||||||
|
akka.protobuf.ByteString value) {
|
||||||
|
if (value == null) {
|
||||||
|
throw new NullPointerException();
|
||||||
|
}
|
||||||
|
bitField0_ |= 0x00000001;
|
||||||
|
letter_ = value;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
// required uint32 row = 2;
|
||||||
|
private int row_ ;
|
||||||
|
/**
|
||||||
|
* <code>required uint32 row = 2;</code>
|
||||||
|
*/
|
||||||
|
public boolean hasRow() {
|
||||||
|
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>required uint32 row = 2;</code>
|
||||||
|
*/
|
||||||
|
public int getRow() {
|
||||||
|
return row_;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>required uint32 row = 2;</code>
|
||||||
|
*/
|
||||||
|
public Builder setRow(int value) {
|
||||||
|
bitField0_ |= 0x00000002;
|
||||||
|
row_ = value;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>required uint32 row = 2;</code>
|
||||||
|
*/
|
||||||
|
public Builder clearRow() {
|
||||||
|
bitField0_ = (bitField0_ & ~0x00000002);
|
||||||
|
row_ = 0;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
// optional string seatType = 3;
|
||||||
|
private java.lang.Object seatType_ = "";
|
||||||
|
/**
|
||||||
|
* <code>optional string seatType = 3;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the new field
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public boolean hasSeatType() {
|
||||||
|
return ((bitField0_ & 0x00000004) == 0x00000004);
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional string seatType = 3;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the new field
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public java.lang.String getSeatType() {
|
||||||
|
java.lang.Object ref = seatType_;
|
||||||
|
if (!(ref instanceof java.lang.String)) {
|
||||||
|
java.lang.String s = ((akka.protobuf.ByteString) ref)
|
||||||
|
.toStringUtf8();
|
||||||
|
seatType_ = s;
|
||||||
|
return s;
|
||||||
|
} else {
|
||||||
|
return (java.lang.String) ref;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional string seatType = 3;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the new field
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public akka.protobuf.ByteString
|
||||||
|
getSeatTypeBytes() {
|
||||||
|
java.lang.Object ref = seatType_;
|
||||||
|
if (ref instanceof String) {
|
||||||
|
akka.protobuf.ByteString b =
|
||||||
|
akka.protobuf.ByteString.copyFromUtf8(
|
||||||
|
(java.lang.String) ref);
|
||||||
|
seatType_ = b;
|
||||||
|
return b;
|
||||||
|
} else {
|
||||||
|
return (akka.protobuf.ByteString) ref;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional string seatType = 3;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the new field
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public Builder setSeatType(
|
||||||
|
java.lang.String value) {
|
||||||
|
if (value == null) {
|
||||||
|
throw new NullPointerException();
|
||||||
|
}
|
||||||
|
bitField0_ |= 0x00000004;
|
||||||
|
seatType_ = value;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional string seatType = 3;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the new field
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public Builder clearSeatType() {
|
||||||
|
bitField0_ = (bitField0_ & ~0x00000004);
|
||||||
|
seatType_ = getDefaultInstance().getSeatType();
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional string seatType = 3;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the new field
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public Builder setSeatTypeBytes(
|
||||||
|
akka.protobuf.ByteString value) {
|
||||||
|
if (value == null) {
|
||||||
|
throw new NullPointerException();
|
||||||
|
}
|
||||||
|
bitField0_ |= 0x00000004;
|
||||||
|
seatType_ = value;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
// @@protoc_insertion_point(builder_scope:docs.persistence.SeatReserved)
|
||||||
|
}
|
||||||
|
|
||||||
|
static {
|
||||||
|
defaultInstance = new SeatReserved(true);
|
||||||
|
defaultInstance.initFields();
|
||||||
|
}
|
||||||
|
|
||||||
|
// @@protoc_insertion_point(class_scope:docs.persistence.SeatReserved)
|
||||||
|
}
|
||||||
|
|
||||||
|
private static akka.protobuf.Descriptors.Descriptor
|
||||||
|
internal_static_docs_persistence_SeatReserved_descriptor;
|
||||||
|
private static
|
||||||
|
akka.protobuf.GeneratedMessage.FieldAccessorTable
|
||||||
|
internal_static_docs_persistence_SeatReserved_fieldAccessorTable;
|
||||||
|
|
||||||
|
public static akka.protobuf.Descriptors.FileDescriptor
|
||||||
|
getDescriptor() {
|
||||||
|
return descriptor;
|
||||||
|
}
|
||||||
|
private static akka.protobuf.Descriptors.FileDescriptor
|
||||||
|
descriptor;
|
||||||
|
static {
|
||||||
|
java.lang.String[] descriptorData = {
|
||||||
|
"\n\025FlightAppModels.proto\022\020docs.persistenc" +
|
||||||
|
"e\"=\n\014SeatReserved\022\016\n\006letter\030\001 \002(\t\022\013\n\003row" +
|
||||||
|
"\030\002 \002(\r\022\020\n\010seatType\030\003 \001(\tB\032\n\026docs.persist" +
|
||||||
|
"ence.protoH\001"
|
||||||
|
};
|
||||||
|
akka.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||||
|
new akka.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||||
|
public akka.protobuf.ExtensionRegistry assignDescriptors(
|
||||||
|
akka.protobuf.Descriptors.FileDescriptor root) {
|
||||||
|
descriptor = root;
|
||||||
|
internal_static_docs_persistence_SeatReserved_descriptor =
|
||||||
|
getDescriptor().getMessageTypes().get(0);
|
||||||
|
internal_static_docs_persistence_SeatReserved_fieldAccessorTable = new
|
||||||
|
akka.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||||
|
internal_static_docs_persistence_SeatReserved_descriptor,
|
||||||
|
new java.lang.String[] { "Letter", "Row", "SeatType", });
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
akka.protobuf.Descriptors.FileDescriptor
|
||||||
|
.internalBuildGeneratedFileFrom(descriptorData,
|
||||||
|
new akka.protobuf.Descriptors.FileDescriptor[] {
|
||||||
|
}, assigner);
|
||||||
|
}
|
||||||
|
|
||||||
|
// @@protoc_insertion_point(outer_class_scope)
|
||||||
|
}
|
||||||
|
|
@ -4282,14 +4282,26 @@ public final class MessageFormats {
|
||||||
// optional string timeout = 2;
|
// optional string timeout = 2;
|
||||||
/**
|
/**
|
||||||
* <code>optional string timeout = 2;</code>
|
* <code>optional string timeout = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
*not used in new records from 2.4.5
|
||||||
|
* </pre>
|
||||||
*/
|
*/
|
||||||
boolean hasTimeout();
|
boolean hasTimeout();
|
||||||
/**
|
/**
|
||||||
* <code>optional string timeout = 2;</code>
|
* <code>optional string timeout = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
*not used in new records from 2.4.5
|
||||||
|
* </pre>
|
||||||
*/
|
*/
|
||||||
java.lang.String getTimeout();
|
java.lang.String getTimeout();
|
||||||
/**
|
/**
|
||||||
* <code>optional string timeout = 2;</code>
|
* <code>optional string timeout = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
*not used in new records from 2.4.5
|
||||||
|
* </pre>
|
||||||
*/
|
*/
|
||||||
akka.protobuf.ByteString
|
akka.protobuf.ByteString
|
||||||
getTimeoutBytes();
|
getTimeoutBytes();
|
||||||
|
|
@ -4458,12 +4470,20 @@ public final class MessageFormats {
|
||||||
private java.lang.Object timeout_;
|
private java.lang.Object timeout_;
|
||||||
/**
|
/**
|
||||||
* <code>optional string timeout = 2;</code>
|
* <code>optional string timeout = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
*not used in new records from 2.4.5
|
||||||
|
* </pre>
|
||||||
*/
|
*/
|
||||||
public boolean hasTimeout() {
|
public boolean hasTimeout() {
|
||||||
return ((bitField0_ & 0x00000002) == 0x00000002);
|
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* <code>optional string timeout = 2;</code>
|
* <code>optional string timeout = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
*not used in new records from 2.4.5
|
||||||
|
* </pre>
|
||||||
*/
|
*/
|
||||||
public java.lang.String getTimeout() {
|
public java.lang.String getTimeout() {
|
||||||
java.lang.Object ref = timeout_;
|
java.lang.Object ref = timeout_;
|
||||||
|
|
@ -4481,6 +4501,10 @@ public final class MessageFormats {
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* <code>optional string timeout = 2;</code>
|
* <code>optional string timeout = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
*not used in new records from 2.4.5
|
||||||
|
* </pre>
|
||||||
*/
|
*/
|
||||||
public akka.protobuf.ByteString
|
public akka.protobuf.ByteString
|
||||||
getTimeoutBytes() {
|
getTimeoutBytes() {
|
||||||
|
|
@ -4863,12 +4887,20 @@ public final class MessageFormats {
|
||||||
private java.lang.Object timeout_ = "";
|
private java.lang.Object timeout_ = "";
|
||||||
/**
|
/**
|
||||||
* <code>optional string timeout = 2;</code>
|
* <code>optional string timeout = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
*not used in new records from 2.4.5
|
||||||
|
* </pre>
|
||||||
*/
|
*/
|
||||||
public boolean hasTimeout() {
|
public boolean hasTimeout() {
|
||||||
return ((bitField0_ & 0x00000002) == 0x00000002);
|
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* <code>optional string timeout = 2;</code>
|
* <code>optional string timeout = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
*not used in new records from 2.4.5
|
||||||
|
* </pre>
|
||||||
*/
|
*/
|
||||||
public java.lang.String getTimeout() {
|
public java.lang.String getTimeout() {
|
||||||
java.lang.Object ref = timeout_;
|
java.lang.Object ref = timeout_;
|
||||||
|
|
@ -4883,6 +4915,10 @@ public final class MessageFormats {
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* <code>optional string timeout = 2;</code>
|
* <code>optional string timeout = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
*not used in new records from 2.4.5
|
||||||
|
* </pre>
|
||||||
*/
|
*/
|
||||||
public akka.protobuf.ByteString
|
public akka.protobuf.ByteString
|
||||||
getTimeoutBytes() {
|
getTimeoutBytes() {
|
||||||
|
|
@ -4899,6 +4935,10 @@ public final class MessageFormats {
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* <code>optional string timeout = 2;</code>
|
* <code>optional string timeout = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
*not used in new records from 2.4.5
|
||||||
|
* </pre>
|
||||||
*/
|
*/
|
||||||
public Builder setTimeout(
|
public Builder setTimeout(
|
||||||
java.lang.String value) {
|
java.lang.String value) {
|
||||||
|
|
@ -4912,6 +4952,10 @@ public final class MessageFormats {
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* <code>optional string timeout = 2;</code>
|
* <code>optional string timeout = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
*not used in new records from 2.4.5
|
||||||
|
* </pre>
|
||||||
*/
|
*/
|
||||||
public Builder clearTimeout() {
|
public Builder clearTimeout() {
|
||||||
bitField0_ = (bitField0_ & ~0x00000002);
|
bitField0_ = (bitField0_ & ~0x00000002);
|
||||||
|
|
@ -4921,6 +4965,10 @@ public final class MessageFormats {
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* <code>optional string timeout = 2;</code>
|
* <code>optional string timeout = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
*not used in new records from 2.4.5
|
||||||
|
* </pre>
|
||||||
*/
|
*/
|
||||||
public Builder setTimeoutBytes(
|
public Builder setTimeoutBytes(
|
||||||
akka.protobuf.ByteString value) {
|
akka.protobuf.ByteString value) {
|
||||||
|
|
|
||||||
|
|
@ -87,8 +87,7 @@ private[remote] final class SendQueue[T] extends GraphStageWithMaterializedValue
|
||||||
needWakeup = true
|
needWakeup = true
|
||||||
// additional poll() to grab any elements that might missed the needWakeup
|
// additional poll() to grab any elements that might missed the needWakeup
|
||||||
// and have been enqueued just after it
|
// and have been enqueued just after it
|
||||||
if (firstAttempt)
|
if (firstAttempt) tryPush(firstAttempt = false)
|
||||||
tryPush(firstAttempt = false)
|
|
||||||
case elem ⇒
|
case elem ⇒
|
||||||
needWakeup = false // there will be another onPull
|
needWakeup = false // there will be another onPull
|
||||||
push(out, elem)
|
push(out, elem)
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,274 @@
|
||||||
|
/**
|
||||||
|
* Copyright (C) 2014-2017 Lightbend Inc. <http://www.lightbend.com>
|
||||||
|
*/
|
||||||
|
package akka.stream.remote
|
||||||
|
|
||||||
|
import akka.NotUsed
|
||||||
|
import akka.actor.{ Actor, ActorIdentity, ActorLogging, ActorRef, ActorSystem, ActorSystemImpl, Identify, Props }
|
||||||
|
import akka.stream.ActorMaterializer
|
||||||
|
import akka.stream.remote.scaladsl.{ SinkRef, SourceRef }
|
||||||
|
import akka.stream.scaladsl.{ Sink, Source }
|
||||||
|
import akka.testkit.{ AkkaSpec, ImplicitSender, SocketUtil, TestKit, TestProbe }
|
||||||
|
import akka.util.ByteString
|
||||||
|
import com.typesafe.config._
|
||||||
|
|
||||||
|
import scala.concurrent.duration._
|
||||||
|
import scala.concurrent.{ Await, Future }
|
||||||
|
|
||||||
|
object StreamRefsSpec {
|
||||||
|
|
||||||
|
object DatasourceActor {
|
||||||
|
def props(probe: ActorRef): Props =
|
||||||
|
Props(new DatasourceActor(probe))
|
||||||
|
.withDispatcher("akka.test.stream-dispatcher")
|
||||||
|
}
|
||||||
|
|
||||||
|
class DatasourceActor(probe: ActorRef) extends Actor with ActorLogging {
|
||||||
|
implicit val mat = ActorMaterializer()
|
||||||
|
|
||||||
|
def receive = {
|
||||||
|
case "give" ⇒
|
||||||
|
/*
|
||||||
|
* Here we're able to send a source to a remote recipient
|
||||||
|
*
|
||||||
|
* For them it's a Source; for us it is a Sink we run data "into"
|
||||||
|
*/
|
||||||
|
val source: Source[String, NotUsed] = Source(List("hello", "world"))
|
||||||
|
val ref: Future[SourceRef[String]] = source.runWith(SourceRef.sink())
|
||||||
|
|
||||||
|
println(s"source = ${source}")
|
||||||
|
println(s"ref = ${Await.result(ref, 10.seconds)}")
|
||||||
|
|
||||||
|
sender() ! Await.result(ref, 10.seconds)
|
||||||
|
|
||||||
|
// case "send-bulk" ⇒
|
||||||
|
// /*
|
||||||
|
// * Here we're able to send a source to a remote recipient
|
||||||
|
// * The source is a "bulk transfer one, in which we're ready to send a lot of data"
|
||||||
|
// *
|
||||||
|
// * For them it's a Source; for us it is a Sink we run data "into"
|
||||||
|
// */
|
||||||
|
// val source: Source[ByteString, NotUsed] = Source.single(ByteString("huge-file-"))
|
||||||
|
// val ref: SourceRef[ByteString] = source.runWith(SourceRef.bulkTransfer())
|
||||||
|
// sender() ! BulkSourceMsg(ref)
|
||||||
|
|
||||||
|
case "receive" ⇒
|
||||||
|
/*
|
||||||
|
* We write out code, knowing that the other side will stream the data into it.
|
||||||
|
*
|
||||||
|
* For them it's a Sink; for us it's a Source.
|
||||||
|
*/
|
||||||
|
val sink: Future[SinkRef[String]] =
|
||||||
|
SinkRef.source[String]
|
||||||
|
.to(Sink.actorRef(probe, "<COMPLETE>"))
|
||||||
|
.run()
|
||||||
|
|
||||||
|
// FIXME we want to avoid forcing people to do the Future here
|
||||||
|
sender() ! Await.result(sink, 10.seconds)
|
||||||
|
|
||||||
|
// case "receive-bulk" ⇒
|
||||||
|
// /*
|
||||||
|
// * We write out code, knowing that the other side will stream the data into it.
|
||||||
|
// * This will open a dedicated connection per transfer.
|
||||||
|
// *
|
||||||
|
// * For them it's a Sink; for us it's a Source.
|
||||||
|
// */
|
||||||
|
// val sink: SinkRef[ByteString] =
|
||||||
|
// SinkRef.bulkTransferSource()
|
||||||
|
// .to(Sink.actorRef(probe, "<COMPLETE>"))
|
||||||
|
// .run()
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// sender() ! BulkSinkMsg(sink)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------------------------
|
||||||
|
|
||||||
|
final case class SourceMsg(dataSource: SourceRef[String])
|
||||||
|
final case class BulkSourceMsg(dataSource: SourceRef[ByteString])
|
||||||
|
final case class SinkMsg(dataSink: SinkRef[String])
|
||||||
|
final case class BulkSinkMsg(dataSink: SinkRef[ByteString])
|
||||||
|
|
||||||
|
def config(): Config = {
|
||||||
|
val address = SocketUtil.temporaryServerAddress()
|
||||||
|
ConfigFactory.parseString(
|
||||||
|
s"""
|
||||||
|
akka {
|
||||||
|
loglevel = INFO
|
||||||
|
|
||||||
|
actor {
|
||||||
|
provider = remote
|
||||||
|
serialize-messages = off
|
||||||
|
|
||||||
|
// serializers {
|
||||||
|
// akka-stream-ref-test = "akka.stream.remote.StreamRefsSpecSerializer"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// serialization-bindings {
|
||||||
|
// "akka.stream.remote.StreamRefsSpec$$SourceMsg" = akka-stream-ref-test
|
||||||
|
// "akka.stream.remote.StreamRefsSpec$$BulkSourceMsg" = akka-stream-ref-test
|
||||||
|
// "akka.stream.remote.StreamRefsSpec$$SinkMsg" = akka-stream-ref-test
|
||||||
|
// "akka.stream.remote.StreamRefsSpec$$BulkSinkMsg" = akka-stream-ref-test
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// serialization-identifiers {
|
||||||
|
// "akka.stream.remote.StreamRefsSpecSerializer" = 33
|
||||||
|
// }
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
remote.netty.tcp {
|
||||||
|
port = ${address.getPort}
|
||||||
|
hostname = "${address.getHostName}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
""").withFallback(ConfigFactory.load())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSender {
|
||||||
|
import StreamRefsSpec._
|
||||||
|
|
||||||
|
def this() {
|
||||||
|
this(StreamRefsSpec.config())
|
||||||
|
}
|
||||||
|
|
||||||
|
val remoteSystem = ActorSystem("RemoteSystem", StreamRefsSpec.config())
|
||||||
|
implicit val mat = ActorMaterializer()
|
||||||
|
|
||||||
|
override protected def beforeTermination(): Unit =
|
||||||
|
TestKit.shutdownActorSystem(remoteSystem)
|
||||||
|
|
||||||
|
val p = TestProbe()
|
||||||
|
|
||||||
|
// obtain the remoteActor ref via selection in order to use _real_ remoting in this test
|
||||||
|
val remoteActor = {
|
||||||
|
val it = remoteSystem.actorOf(DatasourceActor.props(p.ref), "remoteActor")
|
||||||
|
val remoteAddress = remoteSystem.asInstanceOf[ActorSystemImpl].provider.getDefaultAddress
|
||||||
|
system.actorSelection(it.path.toStringWithAddress(remoteAddress)) ! Identify("hi")
|
||||||
|
expectMsgType[ActorIdentity].ref.get
|
||||||
|
}
|
||||||
|
|
||||||
|
"A SourceRef" must {
|
||||||
|
|
||||||
|
"send messages via remoting" in {
|
||||||
|
remoteActor ! "give"
|
||||||
|
val sourceRef = expectMsgType[SourceRef[String]]
|
||||||
|
|
||||||
|
Source.fromGraph(sourceRef)
|
||||||
|
.log("RECEIVED")
|
||||||
|
.runWith(Sink.actorRef(p.ref, "<COMPLETE>"))
|
||||||
|
|
||||||
|
p.expectMsg("hello")
|
||||||
|
p.expectMsg("world")
|
||||||
|
p.expectMsg("<COMPLETE>")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
"A SinkRef" must {
|
||||||
|
|
||||||
|
"receive elements via remoting" in {
|
||||||
|
|
||||||
|
remoteActor ! "receive"
|
||||||
|
val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]]
|
||||||
|
|
||||||
|
Source("hello" :: "world" :: Nil)
|
||||||
|
.to(remoteSink)
|
||||||
|
.run()
|
||||||
|
|
||||||
|
p.expectMsg("hello")
|
||||||
|
p.expectMsg("world")
|
||||||
|
p.expectMsg("<COMPLETE>")
|
||||||
|
}
|
||||||
|
|
||||||
|
"fail origin if remote Sink gets a failure" in {
|
||||||
|
|
||||||
|
remoteActor ! "receive"
|
||||||
|
val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]]
|
||||||
|
|
||||||
|
val remoteFailureMessage = "Booom!"
|
||||||
|
Source.failed(new Exception(remoteFailureMessage))
|
||||||
|
.to(remoteSink)
|
||||||
|
.run()
|
||||||
|
|
||||||
|
val f = p.expectMsgType[akka.actor.Status.Failure]
|
||||||
|
f.cause.getMessage should ===(s"Remote Sink failed, reason: $remoteFailureMessage")
|
||||||
|
}
|
||||||
|
|
||||||
|
"receive hundreds of elements via remoting" in {
|
||||||
|
remoteActor ! "receive"
|
||||||
|
val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]]
|
||||||
|
|
||||||
|
val msgs = (1 to 100).toList.map(i ⇒ s"payload-$i")
|
||||||
|
|
||||||
|
Source(msgs)
|
||||||
|
.to(remoteSink)
|
||||||
|
.run()
|
||||||
|
|
||||||
|
msgs.foreach(t ⇒ p.expectMsg(t))
|
||||||
|
p.expectMsg("<COMPLETE>")
|
||||||
|
}
|
||||||
|
|
||||||
|
// "fail origin if remote Sink is stopped abruptly" in {
|
||||||
|
// val otherSystem = ActorSystem("OtherRemoteSystem", StreamRefsSpec.config())
|
||||||
|
//
|
||||||
|
// try {
|
||||||
|
// // obtain the remoteActor ref via selection in order to use _real_ remoting in this test
|
||||||
|
// val remoteActor = {
|
||||||
|
// val it = otherSystem.actorOf(DatasourceActor.props(p.ref), "remoteActor")
|
||||||
|
// val remoteAddress = otherSystem.asInstanceOf[ActorSystemImpl].provider.getDefaultAddress
|
||||||
|
// system.actorSelection(it.path.toStringWithAddress(remoteAddress)) ! Identify("hi")
|
||||||
|
// expectMsgType[ActorIdentity].ref.get
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// remoteActor ! "receive"
|
||||||
|
// val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]]
|
||||||
|
//
|
||||||
|
// val otherMat = ActorMaterializer()(otherSystem)
|
||||||
|
// Source.maybe[String] // not emitting anything
|
||||||
|
// .to(remoteSink)
|
||||||
|
// .run()(otherMat)
|
||||||
|
//
|
||||||
|
// // and the system crashes; which should cause abrupt termination in the stream
|
||||||
|
// Thread.sleep(300)
|
||||||
|
// otherMat.shutdown()
|
||||||
|
//
|
||||||
|
// val f = p.expectMsgType[akka.actor.Status.Failure]
|
||||||
|
// f.cause.getMessage should ===(s"Remote Sink failed, reason:")
|
||||||
|
// } finally TestKit.shutdownActorSystem(otherSystem)
|
||||||
|
// }
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
//
|
||||||
|
//class StreamRefsSpecSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest with BaseSerializer {
|
||||||
|
//
|
||||||
|
// lazy val ext = SerializationExtension(system)
|
||||||
|
//
|
||||||
|
// override def manifest(o: AnyRef): String = o match {
|
||||||
|
// case StreamRefsSpec.SinkMsg(_) ⇒ "si"
|
||||||
|
// case StreamRefsSpec.BulkSinkMsg(_) ⇒ "bsi"
|
||||||
|
// case StreamRefsSpec.SourceMsg(_) ⇒ "so"
|
||||||
|
// case StreamRefsSpec.BulkSourceMsg(_) ⇒ "bso"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// override def toBinary(o: AnyRef): Array[Byte] = {
|
||||||
|
// system.log.warning("Serializing: " + o)
|
||||||
|
// o match {
|
||||||
|
// case StreamRefsSpec.SinkMsg(s) ⇒ s.
|
||||||
|
// case StreamRefsSpec.BulkSinkMsg(s) ⇒ ext.serialize(s).get
|
||||||
|
// case StreamRefsSpec.SourceMsg(s) ⇒ ext.serialize(s).get
|
||||||
|
// case StreamRefsSpec.BulkSourceMsg(s) ⇒ ext.serialize(s).get
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = {
|
||||||
|
// system.log.warning("MANI: " + manifest)
|
||||||
|
// ???
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
//}
|
||||||
File diff suppressed because it is too large
Load diff
56
akka-stream/src/main/protobuf/StreamRefContainers.proto
Normal file
56
akka-stream/src/main/protobuf/StreamRefContainers.proto
Normal file
|
|
@ -0,0 +1,56 @@
|
||||||
|
/**
|
||||||
|
* Copyright (C) 2009-2017 Lightbend Inc. <http://www.lightbend.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
option java_package = "akka.stream.remote";
|
||||||
|
option optimize_for = SPEED;
|
||||||
|
|
||||||
|
/*************************************************
|
||||||
|
StreamRefs (SourceRef / SinkRef) related formats
|
||||||
|
**************************************************/
|
||||||
|
|
||||||
|
// TODO make explicit types for SinkRef/SourceRef as they can carry initial settings (initial demand etc)
|
||||||
|
|
||||||
|
message SinkRef {
|
||||||
|
required ActorRef targetRef = 1;
|
||||||
|
optional int64 initialDemand = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SourceRef {
|
||||||
|
required ActorRef originRef = 1;
|
||||||
|
// FIXME: it's basically SinkRef since we just ommit the optional initial demand...
|
||||||
|
// FIXME: could it be that all those passable refs should be expressed internally as a StreamRef?
|
||||||
|
}
|
||||||
|
|
||||||
|
message ActorRef {
|
||||||
|
required string path = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Option {
|
||||||
|
optional Payload value = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Payload {
|
||||||
|
required bytes enclosedMessage = 1;
|
||||||
|
required int32 serializerId = 2;
|
||||||
|
optional bytes messageManifest = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// stream refs protocol
|
||||||
|
|
||||||
|
message CumulativeDemand {
|
||||||
|
required int64 seqNr = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SequencedOnNext {
|
||||||
|
required int64 seqNr = 1;
|
||||||
|
required Payload payload = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RemoteSinkFailure {
|
||||||
|
optional bytes cause = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RemoteSinkCompleted {
|
||||||
|
required int64 seqNr = 1;
|
||||||
|
}
|
||||||
|
|
@ -34,13 +34,13 @@ akka {
|
||||||
# for cancelation (see `akka.stream.subscription-timeout.mode`)
|
# for cancelation (see `akka.stream.subscription-timeout.mode`)
|
||||||
timeout = 5s
|
timeout = 5s
|
||||||
}
|
}
|
||||||
|
|
||||||
# Enable additional troubleshooting logging at DEBUG log level
|
# Enable additional troubleshooting logging at DEBUG log level
|
||||||
debug-logging = off
|
debug-logging = off
|
||||||
|
|
||||||
# Maximum number of elements emitted in batch if downstream signals large demand
|
# Maximum number of elements emitted in batch if downstream signals large demand
|
||||||
output-burst-limit = 1000
|
output-burst-limit = 1000
|
||||||
|
|
||||||
# Enable automatic fusing of all graphs that are run. For short-lived streams
|
# Enable automatic fusing of all graphs that are run. For short-lived streams
|
||||||
# this may cause an initial runtime overhead, but most of the time fusing is
|
# this may cause an initial runtime overhead, but most of the time fusing is
|
||||||
# desirable since it reduces the number of Actors that are created.
|
# desirable since it reduces the number of Actors that are created.
|
||||||
|
|
@ -98,12 +98,57 @@ akka {
|
||||||
fixed-pool-size = 16
|
fixed-pool-size = 16
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# configure defaults for SourceRef and SinkRef
|
||||||
|
stream-refs {
|
||||||
|
# Default initial demand to be sent along with a SinkRef.
|
||||||
|
#
|
||||||
|
# The receiving side will be prepared to buffer as much as that many elements,
|
||||||
|
# and the sending side can assume to safely send as many elements without receiving
|
||||||
|
# an demand message before.
|
||||||
|
initial-demand = 4
|
||||||
|
|
||||||
|
# Demand is signalled by sending a cumulative demand message ("requesting messages until the n-th sequence number)
|
||||||
|
# Using a cumulative demand model allows us to re-deliver the demand message in case of message loss (which should
|
||||||
|
# be very rare in any case, yet possible -- mostly under connection break-down and re-establishment).
|
||||||
|
#
|
||||||
|
# The semantics of handling and updating the demand however are in-line with what Reactive Streams dictates.
|
||||||
|
#
|
||||||
|
# In normal operation, demand is signalled in response to arriving elements, however if no new elements arrive
|
||||||
|
# within `demand-redelivery-interval` a re-delivery of the demand will be triggered, assuming that it may have gotten lost.
|
||||||
|
demand-redelivery-interval = 500 ms
|
||||||
|
|
||||||
|
# Idle timeout, after which both sides of the stream-ref will terminate.
|
||||||
|
# Notice that demand-redelivery works as a keep-alive, and if a remote Sink keeps receiving the demand re-deliveries,
|
||||||
|
# it knows the other side is still alive and will not terminate. The other-side though will eventually decide, by its
|
||||||
|
# idle-timeout that "did not get any signal from the remote, over idle-timeout, so will terminate" and that termination
|
||||||
|
# would be signalled back to the remote Sink.
|
||||||
|
idle-timeout = 5 seconds
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# configure overrides to ssl-configuration here (to be used by akka-streams, and akka-http – i.e. when serving https connections)
|
# configure overrides to ssl-configuration here (to be used by akka-streams, and akka-http – i.e. when serving https connections)
|
||||||
ssl-config {
|
ssl-config {
|
||||||
protocol = "TLSv1.2"
|
protocol = "TLSv1.2"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
actor {
|
||||||
|
|
||||||
|
serializers {
|
||||||
|
akka-stream-refs = "akka.stream.remote.serialization.StreamRefSerializer"
|
||||||
|
}
|
||||||
|
|
||||||
|
serialization-bindings {
|
||||||
|
"akka.stream.remote.scaladsl.SinkRef" = akka-stream-refs
|
||||||
|
"akka.stream.remote.scaladsl.SourceRef" = akka-stream-refs
|
||||||
|
"akka.stream.remote.StreamRefs$Protocol" = akka-stream-refs
|
||||||
|
}
|
||||||
|
|
||||||
|
serialization-identifiers {
|
||||||
|
"akka.stream.remote.serialization.StreamRefSerializer" = 30
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# ssl configuration
|
# ssl configuration
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,67 @@
|
||||||
|
/**
|
||||||
|
* Copyright (C) 2014-2017 Lightbend Inc. <http://www.lightbend.com>
|
||||||
|
*/
|
||||||
|
package akka.stream.remote
|
||||||
|
|
||||||
|
import akka.actor.{ ActorRef, DeadLetterSuppression }
|
||||||
|
import akka.annotation.InternalApi
|
||||||
|
import akka.stream.impl.ReactiveStreamsCompliance
|
||||||
|
|
||||||
|
/** INTERNAL API: Protocol messages used by the various stream -ref implementations. */
|
||||||
|
@InternalApi
|
||||||
|
private[akka] object StreamRefs {
|
||||||
|
|
||||||
|
@InternalApi
|
||||||
|
sealed trait Protocol
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sequenced `Subscriber#onNext` equivalent.
|
||||||
|
* The receiving end of these messages MUST fail the stream if it observes gaps in the sequence,
|
||||||
|
* as these messages will not be re-delivered.
|
||||||
|
*
|
||||||
|
* Sequence numbers start from `0`.
|
||||||
|
*/
|
||||||
|
@InternalApi
|
||||||
|
final case class SequencedOnNext[T](seqNr: Long, payload: T) extends StreamRefs.Protocol {
|
||||||
|
if (payload == null) throw ReactiveStreamsCompliance.elementMustNotBeNullException
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Sent to a the receiver side of a SinkRef, once the sending side of the SinkRef gets signalled a Failure. */
|
||||||
|
@InternalApi
|
||||||
|
final case class RemoteSinkFailure(msg: String) extends StreamRefs.Protocol
|
||||||
|
|
||||||
|
/** Sent to a the receiver side of a SinkRef, once the sending side of the SinkRef gets signalled a completion. */
|
||||||
|
@InternalApi
|
||||||
|
final case class RemoteSinkCompleted(seqNr: Long) extends StreamRefs.Protocol
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cumulative demand, equivalent to sequence numbering all events in a stream. *
|
||||||
|
* This message may be re-delivered.
|
||||||
|
*/
|
||||||
|
@InternalApi
|
||||||
|
final case class CumulativeDemand(seqNr: Long) extends StreamRefs.Protocol with DeadLetterSuppression {
|
||||||
|
if (seqNr <= 0) throw ReactiveStreamsCompliance.numberOfElementsInRequestMustBePositiveException
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- exceptions ---
|
||||||
|
|
||||||
|
final case class RemoteStreamRefActorTerminatedException(msg: String) extends RuntimeException(msg)
|
||||||
|
final case class RemoteStreamRefFailedException(msg: String) extends RuntimeException(msg)
|
||||||
|
final case class InvalidSequenceNumberException(expectedSeqNr: Long, gotSeqNr: Long, msg: String)
|
||||||
|
extends IllegalStateException(s"$msg (expected: $expectedSeqNr, got: $gotSeqNr)")
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stream refs establish a connection between a local and remote actor, representing the origin and remote sides
|
||||||
|
* of a stream. Each such actor refers to the other side as its "partner". We make sure that no other actor than
|
||||||
|
* the initial partner can send demand/messages to the other side accidentally.
|
||||||
|
*
|
||||||
|
* This exception is thrown when a message is recived from a non-partner actor,
|
||||||
|
* which could mean a bug or some actively malicient behaviour from the other side.
|
||||||
|
*
|
||||||
|
* This is not meant as a security feature, but rather as plain sanity-check.
|
||||||
|
*/
|
||||||
|
final case class InvalidPartnerActorException(expectedRef: ActorRef, gotRef: ActorRef, msg: String)
|
||||||
|
extends IllegalStateException(s"$msg (expected: $expectedRef, got: $gotRef)")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
@ -0,0 +1,61 @@
|
||||||
|
/**
|
||||||
|
* Copyright (C) 2014-2017 Lightbend Inc. <http://www.lightbend.com>
|
||||||
|
*/
|
||||||
|
package akka.stream.remote.impl
|
||||||
|
|
||||||
|
import akka.actor.{ Actor, ActorRef, ActorSystem, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider, Props }
|
||||||
|
import akka.stream.ActorMaterializerHelper
|
||||||
|
import akka.stream.impl.SeqActorName
|
||||||
|
import akka.stream.remote.impl.StreamRefsMasterActor.AllocatePusherToRemoteSink
|
||||||
|
import akka.stream.remote.scaladsl.{ SinkRef, StreamRefSettings }
|
||||||
|
|
||||||
|
object StreamRefsMaster extends ExtensionId[StreamRefsMaster] with ExtensionIdProvider {
|
||||||
|
|
||||||
|
override def createExtension(system: ExtendedActorSystem): StreamRefsMaster =
|
||||||
|
new StreamRefsMaster(system)
|
||||||
|
|
||||||
|
override def lookup(): StreamRefsMaster.type = this
|
||||||
|
|
||||||
|
override def get(system: ActorSystem): StreamRefsMaster = super.get(system)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** INTERNAL API */
|
||||||
|
private[stream] final class StreamRefsMaster(system: ExtendedActorSystem) extends Extension {
|
||||||
|
|
||||||
|
val settings: StreamRefSettings = new StreamRefSettings(system.settings.config)
|
||||||
|
|
||||||
|
private[this] val sourceRefOriginSinkNames = SeqActorName("SourceRefOriginSink") // "local origin"
|
||||||
|
private[this] val sourceRefNames = SeqActorName("SourceRef") // "remote receiver"
|
||||||
|
|
||||||
|
private[this] val sinkRefTargetSourceNames = SeqActorName("SinkRefTargetSource") // "local target"
|
||||||
|
private[this] val sinkRefNames = SeqActorName("SinkRef") // "remote sender"
|
||||||
|
|
||||||
|
// TODO do we need it? perhaps for reaping?
|
||||||
|
// system.systemActorOf(StreamRefsMasterActor.props(), "streamRefsMaster")
|
||||||
|
|
||||||
|
def nextSinkRefTargetSourceName(): String =
|
||||||
|
sinkRefTargetSourceNames.next()
|
||||||
|
|
||||||
|
def nextSinkRefName(): String =
|
||||||
|
sinkRefNames.next()
|
||||||
|
|
||||||
|
def nextSourceRefOriginSinkName(): String =
|
||||||
|
sourceRefOriginSinkNames.next()
|
||||||
|
|
||||||
|
def nextSourceRefName(): String =
|
||||||
|
sourceRefNames.next()
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
object StreamRefsMasterActor {
|
||||||
|
def props(): Props = Props(new StreamRefsMasterActor())
|
||||||
|
|
||||||
|
final case class AllocatePusherToRemoteSink(stageRef: ActorRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
class StreamRefsMasterActor extends Actor {
|
||||||
|
override def receive: Receive = {
|
||||||
|
case AllocatePusherToRemoteSink(stageRef) ⇒
|
||||||
|
// context.actorOf()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,274 @@
|
||||||
|
/**
|
||||||
|
* Copyright (C) 2014-2017 Lightbend Inc. <http://www.lightbend.com>
|
||||||
|
*/
|
||||||
|
package akka.stream.remote.scaladsl
|
||||||
|
|
||||||
|
import java.util.Queue
|
||||||
|
|
||||||
|
import akka.actor.{ ActorRef, Terminated }
|
||||||
|
import akka.event.Logging
|
||||||
|
import akka.stream._
|
||||||
|
import akka.stream.actor.{ MaxInFlightRequestStrategy, RequestStrategy, WatermarkRequestStrategy }
|
||||||
|
import akka.stream.impl.FixedSizeBuffer
|
||||||
|
import akka.stream.remote.StreamRefs
|
||||||
|
import akka.stream.remote.impl.StreamRefsMaster
|
||||||
|
import akka.stream.scaladsl.Source
|
||||||
|
import akka.stream.stage._
|
||||||
|
import akka.util.ByteString
|
||||||
|
|
||||||
|
import scala.concurrent.{ Future, Promise }
|
||||||
|
|
||||||
|
object SinkRef {
|
||||||
|
def source[T](): Source[T, Future[SinkRef[T]]] =
|
||||||
|
Source.fromGraph(new SinkRefTargetSource[T]()) // TODO settings?
|
||||||
|
|
||||||
|
def bulkTransferSource(port: Int = -1): Source[ByteString, SinkRef[ByteString]] = {
|
||||||
|
???
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This stage can only handle a single "sender" (it does not merge values);
|
||||||
|
* The first that pushes is assumed the one we are to trust
|
||||||
|
*/
|
||||||
|
final class SinkRefTargetSource[T]() extends GraphStageWithMaterializedValue[SourceShape[T], Future[SinkRef[T]]] {
|
||||||
|
val out: Outlet[T] = Outlet[T](s"${Logging.simpleName(getClass)}.out")
|
||||||
|
override def shape = SourceShape.of(out)
|
||||||
|
|
||||||
|
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = {
|
||||||
|
val promise = Promise[SinkRef[T]]()
|
||||||
|
|
||||||
|
val logic = new TimerGraphStageLogic(shape) with StageLogging with OutHandler {
|
||||||
|
private[this] lazy val streamRefsMaster = StreamRefsMaster(ActorMaterializerHelper.downcast(materializer).system)
|
||||||
|
private[this] lazy val settings = streamRefsMaster.settings
|
||||||
|
|
||||||
|
private[this] var self: GraphStageLogic.StageActor = _
|
||||||
|
private[this] lazy val selfActorName = streamRefsMaster.nextSinkRefTargetSourceName()
|
||||||
|
private[this] implicit def selfSender: ActorRef = self.ref
|
||||||
|
|
||||||
|
// demand management ---
|
||||||
|
private val highDemandWatermark = 16
|
||||||
|
|
||||||
|
private var expectingSeqNr: Long = 0L
|
||||||
|
private var localCumulativeDemand: Long = 0L // initialized in preStart with settings.initialDemand
|
||||||
|
|
||||||
|
private val receiveBuffer = FixedSizeBuffer[T](highDemandWatermark)
|
||||||
|
|
||||||
|
// TODO configurable?
|
||||||
|
// Request strategies talk in terms of Request(n), which we need to translate to cumulative demand
|
||||||
|
// TODO the MaxInFlightRequestStrategy is likely better for this use case, yet was a bit weird to use so this one for now
|
||||||
|
private val requestStrategy: RequestStrategy = WatermarkRequestStrategy(highWatermark = highDemandWatermark)
|
||||||
|
// end of demand management ---
|
||||||
|
|
||||||
|
private var remotePartner: ActorRef = _
|
||||||
|
|
||||||
|
override def preStart(): Unit = {
|
||||||
|
localCumulativeDemand = settings.initialDemand.toLong
|
||||||
|
|
||||||
|
self = getStageActor(initialReceive, name = selfActorName)
|
||||||
|
log.warning("Allocated receiver: {}", self.ref)
|
||||||
|
|
||||||
|
promise.success(new SinkRef(self.ref, settings.initialDemand))
|
||||||
|
}
|
||||||
|
|
||||||
|
override def onPull(): Unit = {
|
||||||
|
tryPush()
|
||||||
|
triggerCumulativeDemand()
|
||||||
|
}
|
||||||
|
|
||||||
|
def triggerCumulativeDemand(): Unit =
|
||||||
|
if (remotePartner ne null) {
|
||||||
|
val remainingRequested = java.lang.Long.min(highDemandWatermark, localCumulativeDemand - expectingSeqNr).toInt
|
||||||
|
val addDemand = requestStrategy.requestDemand(remainingRequested)
|
||||||
|
|
||||||
|
// only if demand has increased we shoot it right away
|
||||||
|
// otherwise it's the same demand level, so it'd be triggered via redelivery anyway
|
||||||
|
if (addDemand > 0) {
|
||||||
|
localCumulativeDemand += addDemand
|
||||||
|
val demand = StreamRefs.CumulativeDemand(localCumulativeDemand)
|
||||||
|
|
||||||
|
log.warning("[{}] Demanding until [{}] (+{})", selfActorName, localCumulativeDemand, addDemand)
|
||||||
|
remotePartner ! demand
|
||||||
|
scheduleDemandRedelivery()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
val DemandRedeliveryTimerKey = "DemandRedeliveryTimerKey"
|
||||||
|
def scheduleDemandRedelivery() = scheduleOnce(DemandRedeliveryTimerKey, settings.demandRedeliveryInterval)
|
||||||
|
override protected def onTimer(timerKey: Any): Unit = timerKey match {
|
||||||
|
case DemandRedeliveryTimerKey ⇒
|
||||||
|
log.debug("[{}] Scheduled re-delivery of demand until [{}]", selfActorName, localCumulativeDemand)
|
||||||
|
remotePartner ! StreamRefs.CumulativeDemand(localCumulativeDemand)
|
||||||
|
scheduleDemandRedelivery()
|
||||||
|
}
|
||||||
|
|
||||||
|
lazy val initialReceive: ((ActorRef, Any)) ⇒ Unit = {
|
||||||
|
case (sender, msg @ StreamRefs.SequencedOnNext(seqNr, payload)) ⇒
|
||||||
|
observeAndValidateSender(sender, "Illegal sender in SequencedOnNext")
|
||||||
|
observeAndValidateSequenceNr(seqNr, "Illegal sequence nr in SequencedOnNext")
|
||||||
|
log.warning("Received seq {} from {}", msg, sender)
|
||||||
|
|
||||||
|
triggerCumulativeDemand()
|
||||||
|
tryPush(payload)
|
||||||
|
|
||||||
|
case (sender, StreamRefs.RemoteSinkCompleted(seqNr)) ⇒
|
||||||
|
observeAndValidateSender(sender, "Illegal sender in RemoteSinkCompleted")
|
||||||
|
observeAndValidateSequenceNr(seqNr, "Illegal sequence nr in RemoteSinkCompleted")
|
||||||
|
log.debug("The remote Sink has completed, completing this source as well...")
|
||||||
|
|
||||||
|
self.unwatch(sender)
|
||||||
|
completeStage()
|
||||||
|
|
||||||
|
case (sender, StreamRefs.RemoteSinkFailure(reason)) ⇒
|
||||||
|
observeAndValidateSender(sender, "Illegal sender in RemoteSinkFailure")
|
||||||
|
log.debug("The remote Sink has failed, failing (reason: {})", reason)
|
||||||
|
|
||||||
|
self.unwatch(sender)
|
||||||
|
failStage(StreamRefs.RemoteStreamRefActorTerminatedException(s"Remote Sink failed, reason: $reason"))
|
||||||
|
}
|
||||||
|
|
||||||
|
def tryPush(): Unit =
|
||||||
|
if (isAvailable(out) && receiveBuffer.nonEmpty) {
|
||||||
|
val elem = receiveBuffer.dequeue()
|
||||||
|
log.warning(s"PUSHING SIGNALED ${elem} (capacity: ${receiveBuffer.used}/${receiveBuffer.capacity})")
|
||||||
|
push(out, elem)
|
||||||
|
}
|
||||||
|
def tryPush(payload: Any): Unit =
|
||||||
|
if (isAvailable(out)) {
|
||||||
|
if (receiveBuffer.nonEmpty) {
|
||||||
|
val elem = receiveBuffer.dequeue()
|
||||||
|
push(out, elem)
|
||||||
|
receiveBuffer.enqueue(payload.asInstanceOf[T])
|
||||||
|
log.warning(s"PUSHING SIGNALED ${elem} BUFFERING payload" + payload + s"(capacity: ${receiveBuffer.used}/${receiveBuffer.capacity})")
|
||||||
|
} else {
|
||||||
|
push(out, payload.asInstanceOf[T])
|
||||||
|
log.warning(s"PUSHING DIRECTLY ${payload}")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
receiveBuffer.enqueue(payload.asInstanceOf[T])
|
||||||
|
log.warning("PUSHING BUFFERING payload" + payload + s"(capacity: ${receiveBuffer.used}/${receiveBuffer.capacity})")
|
||||||
|
}
|
||||||
|
|
||||||
|
@throws[StreamRefs.InvalidPartnerActorException]
|
||||||
|
def observeAndValidateSender(sender: ActorRef, msg: String): Unit =
|
||||||
|
if (remotePartner == null) {
|
||||||
|
log.debug("Received first message from {}, assuming it to be the remote partner for this stage", sender)
|
||||||
|
remotePartner = sender
|
||||||
|
self.watch(sender)
|
||||||
|
} else if (sender != remotePartner) {
|
||||||
|
throw StreamRefs.InvalidPartnerActorException(sender, remotePartner, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
@throws[StreamRefs.InvalidSequenceNumberException]
|
||||||
|
def observeAndValidateSequenceNr(seqNr: Long, msg: String): Unit =
|
||||||
|
if (isInvalidSequenceNr(seqNr)) {
|
||||||
|
throw StreamRefs.InvalidSequenceNumberException(expectingSeqNr, seqNr, msg)
|
||||||
|
} else {
|
||||||
|
expectingSeqNr += 1
|
||||||
|
}
|
||||||
|
def isInvalidSequenceNr(seqNr: Long): Boolean =
|
||||||
|
seqNr != expectingSeqNr
|
||||||
|
|
||||||
|
setHandler(out, this)
|
||||||
|
}
|
||||||
|
(logic, promise.future) // FIXME we'd want to expose just the ref!
|
||||||
|
}
|
||||||
|
|
||||||
|
override def toString: String =
|
||||||
|
s"${Logging.simpleName(getClass)}()}"
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The "handed out" side of a SinkRef. It powers a Source on the other side.
|
||||||
|
* TODO naming!??!?!!?!?!?!
|
||||||
|
*
|
||||||
|
* Do not create this instance directly, but use `SinkRef` factories, to run/setup its targetRef
|
||||||
|
*/
|
||||||
|
final class SinkRef[In] private[akka] ( // TODO is it more of a SourceRefSink?
|
||||||
|
private[akka] val targetRef: ActorRef,
|
||||||
|
private[akka] val initialDemand: Long
|
||||||
|
) extends GraphStage[SinkShape[In]] with Serializable { stage ⇒
|
||||||
|
import akka.stream.remote.StreamRefs._
|
||||||
|
|
||||||
|
val in = Inlet[In](s"${Logging.simpleName(getClass)}($targetRef).in")
|
||||||
|
override def shape: SinkShape[In] = SinkShape.of(in)
|
||||||
|
|
||||||
|
override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) with StageLogging with InHandler {
|
||||||
|
|
||||||
|
private[this] lazy val streamRefsMaster = StreamRefsMaster(ActorMaterializerHelper.downcast(materializer).system)
|
||||||
|
private[this] lazy val selfActorName = streamRefsMaster.nextSinkRefName()
|
||||||
|
|
||||||
|
// we assume that there is at least SOME buffer space
|
||||||
|
private[this] var remoteCumulativeDemandReceived = initialDemand
|
||||||
|
|
||||||
|
// FIXME this one will be sent over remoting so we have to be able to make that work
|
||||||
|
private[this] var remoteCumulativeDemandConsumed = 0L
|
||||||
|
private[this] var self: GraphStageLogic.StageActor = _
|
||||||
|
implicit def selfSender: ActorRef = self.ref
|
||||||
|
|
||||||
|
override def preStart(): Unit = {
|
||||||
|
self = getStageActor(initialReceive, selfActorName)
|
||||||
|
self.watch(targetRef)
|
||||||
|
|
||||||
|
log.warning("Created SinkRef, pointing to remote Sink receiver: {}, local worker: {}", targetRef, self)
|
||||||
|
|
||||||
|
pull(in)
|
||||||
|
}
|
||||||
|
|
||||||
|
lazy val initialReceive: ((ActorRef, Any)) ⇒ Unit = {
|
||||||
|
case (_, Terminated(`targetRef`)) ⇒
|
||||||
|
failStage(failRemoteTerminated())
|
||||||
|
|
||||||
|
case (sender, CumulativeDemand(d)) ⇒
|
||||||
|
validatePartnerRef(sender)
|
||||||
|
|
||||||
|
if (remoteCumulativeDemandReceived < d) {
|
||||||
|
remoteCumulativeDemandReceived = d
|
||||||
|
log.warning("Received cumulative demand [{}], consumable demand: [{}]", CumulativeDemand(d), remoteCumulativeDemandReceived - remoteCumulativeDemandConsumed)
|
||||||
|
}
|
||||||
|
tryPull()
|
||||||
|
}
|
||||||
|
|
||||||
|
override def onPush(): Unit = {
|
||||||
|
val elem = grabSequenced(in)
|
||||||
|
targetRef ! elem
|
||||||
|
log.warning("Sending sequenced: {} to {}", elem, targetRef)
|
||||||
|
tryPull()
|
||||||
|
}
|
||||||
|
|
||||||
|
private def tryPull() =
|
||||||
|
if (remoteCumulativeDemandConsumed < remoteCumulativeDemandReceived && !hasBeenPulled(in))
|
||||||
|
pull(in)
|
||||||
|
|
||||||
|
private def grabSequenced[T](in: Inlet[T]): SequencedOnNext[T] = {
|
||||||
|
val onNext = SequencedOnNext(remoteCumulativeDemandConsumed, grab(in))
|
||||||
|
remoteCumulativeDemandConsumed += 1
|
||||||
|
onNext
|
||||||
|
}
|
||||||
|
|
||||||
|
override def onUpstreamFailure(ex: Throwable): Unit = {
|
||||||
|
targetRef ! StreamRefs.RemoteSinkFailure(ex.getMessage) // TODO yes / no? At least the message I guess
|
||||||
|
self.unwatch(targetRef)
|
||||||
|
super.onUpstreamFailure(ex)
|
||||||
|
}
|
||||||
|
|
||||||
|
override def onUpstreamFinish(): Unit = {
|
||||||
|
targetRef ! StreamRefs.RemoteSinkCompleted(remoteCumulativeDemandConsumed)
|
||||||
|
self.unwatch(targetRef)
|
||||||
|
super.onUpstreamFinish()
|
||||||
|
}
|
||||||
|
|
||||||
|
setHandler(in, this)
|
||||||
|
}
|
||||||
|
|
||||||
|
private def validatePartnerRef(ref: ActorRef) = {
|
||||||
|
if (ref != targetRef) throw new RuntimeException("Got demand from weird actor! Not the one I trust hmmmm!!!")
|
||||||
|
}
|
||||||
|
|
||||||
|
private def failRemoteTerminated() = {
|
||||||
|
RemoteStreamRefActorTerminatedException(s"Remote target receiver of data ${targetRef} terminated. Local stream terminating, message loss (on remote side) may have happened.")
|
||||||
|
}
|
||||||
|
|
||||||
|
override def toString = s"${Logging.simpleName(getClass)}($targetRef)"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,289 @@
|
||||||
|
/**
|
||||||
|
* Copyright (C) 2014-2017 Lightbend Inc. <http://www.lightbend.com>
|
||||||
|
*/
|
||||||
|
package akka.stream.remote.scaladsl
|
||||||
|
|
||||||
|
import akka.NotUsed
|
||||||
|
import akka.actor.ActorRef
|
||||||
|
import akka.event.Logging
|
||||||
|
import akka.stream._
|
||||||
|
import akka.stream.actor.{ RequestStrategy, WatermarkRequestStrategy }
|
||||||
|
import akka.stream.impl.FixedSizeBuffer
|
||||||
|
import akka.stream.remote.StreamRefs
|
||||||
|
import akka.stream.remote.StreamRefs.{ CumulativeDemand, SequencedOnNext }
|
||||||
|
import akka.stream.remote.impl.StreamRefsMaster
|
||||||
|
import akka.stream.scaladsl.{ FlowOps, Sink, Source }
|
||||||
|
import akka.stream.stage._
|
||||||
|
import akka.util.ByteString
|
||||||
|
|
||||||
|
import scala.concurrent.{ Future, Promise }
|
||||||
|
|
||||||
|
// FIXME IMPLEMENT THIS
|
||||||
|
object SourceRef {
|
||||||
|
def sink[T](): Graph[SinkShape[T], Future[SourceRef[T]]] =
|
||||||
|
Sink.fromGraph(new SourceRefOriginSink[T]())
|
||||||
|
|
||||||
|
def bulkTransfer[T](): Graph[SinkShape[ByteString], SourceRef[ByteString]] = ???
|
||||||
|
}
|
||||||
|
|
||||||
|
final class SourceRefOriginSink[T]() extends GraphStageWithMaterializedValue[SinkShape[T], Future[SourceRef[T]]] {
|
||||||
|
val in: Inlet[T] = Inlet[T](s"${Logging.simpleName(getClass)}.in")
|
||||||
|
override def shape: SinkShape[T] = SinkShape.of(in)
|
||||||
|
|
||||||
|
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[SourceRef[T]]) = {
|
||||||
|
val promise = Promise[SourceRef[T]]
|
||||||
|
|
||||||
|
val logic = new TimerGraphStageLogic(shape) with InHandler with StageLogging {
|
||||||
|
private[this] lazy val streamRefsMaster = StreamRefsMaster(ActorMaterializerHelper.downcast(materializer).system)
|
||||||
|
private[this] lazy val settings = streamRefsMaster.settings
|
||||||
|
|
||||||
|
private[this] var remotePartner: ActorRef = _
|
||||||
|
|
||||||
|
private[this] var self: GraphStageLogic.StageActor = _
|
||||||
|
private[this] lazy val selfActorName = streamRefsMaster.nextSinkRefTargetSourceName()
|
||||||
|
private[this] implicit def selfSender: ActorRef = self.ref
|
||||||
|
|
||||||
|
// demand management ---
|
||||||
|
private var remoteCumulativeDemandReceived: Long = 0L
|
||||||
|
private var remoteCumulativeDemandConsumed: Long = 0L
|
||||||
|
// end of demand management ---
|
||||||
|
|
||||||
|
override def preStart(): Unit = {
|
||||||
|
self = getStageActor(initialReceive, selfActorName)
|
||||||
|
log.warning("Allocated receiver: {}", self.ref)
|
||||||
|
|
||||||
|
promise.success(new SourceRef(self.ref))
|
||||||
|
}
|
||||||
|
|
||||||
|
lazy val initialReceive: ((ActorRef, Any)) ⇒ Unit = {
|
||||||
|
case (sender, msg @ StreamRefs.CumulativeDemand(demand)) ⇒
|
||||||
|
observeAndValidateSender(sender, "Illegal sender in CumulativeDemand")
|
||||||
|
|
||||||
|
if (demand > remoteCumulativeDemandReceived) {
|
||||||
|
remoteCumulativeDemandReceived = demand
|
||||||
|
log.warning("Received cumulative demand [{}], consumable demand: [{}]", msg,
|
||||||
|
remoteCumulativeDemandReceived - remoteCumulativeDemandConsumed)
|
||||||
|
}
|
||||||
|
|
||||||
|
tryPull()
|
||||||
|
}
|
||||||
|
|
||||||
|
def tryPull(): Unit =
|
||||||
|
if (remoteCumulativeDemandConsumed < remoteCumulativeDemandReceived && !hasBeenPulled(in))
|
||||||
|
pull(in)
|
||||||
|
|
||||||
|
private def grabSequenced(in: Inlet[T]): SequencedOnNext[T] = {
|
||||||
|
val onNext = SequencedOnNext(remoteCumulativeDemandConsumed, grab(in))
|
||||||
|
remoteCumulativeDemandConsumed += 1
|
||||||
|
onNext
|
||||||
|
}
|
||||||
|
|
||||||
|
override def onPush(): Unit = {
|
||||||
|
val elem = grabSequenced(in)
|
||||||
|
remotePartner ! elem
|
||||||
|
log.warning("Sending sequenced: {} to {}", elem, remotePartner)
|
||||||
|
tryPull()
|
||||||
|
}
|
||||||
|
|
||||||
|
@throws[StreamRefs.InvalidPartnerActorException]
|
||||||
|
def observeAndValidateSender(sender: ActorRef, msg: String): Unit =
|
||||||
|
if (remotePartner == null) {
|
||||||
|
log.debug("Received first message from {}, assuming it to be the remote partner for this stage", sender)
|
||||||
|
remotePartner = sender
|
||||||
|
self.watch(sender)
|
||||||
|
} else if (sender != remotePartner) {
|
||||||
|
throw StreamRefs.InvalidPartnerActorException(sender, remotePartner, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// @throws[StreamRefs.InvalidSequenceNumberException]
|
||||||
|
// def observeAndValidateSequenceNr(seqNr: Long, msg: String): Unit =
|
||||||
|
// if (isInvalidSequenceNr(seqNr)) {
|
||||||
|
// throw StreamRefs.InvalidSequenceNumberException(expectingSeqNr, seqNr, msg)
|
||||||
|
// } else {
|
||||||
|
// expectingSeqNr += 1
|
||||||
|
// }
|
||||||
|
// def isInvalidSequenceNr(seqNr: Long): Boolean =
|
||||||
|
// seqNr != expectingSeqNr
|
||||||
|
|
||||||
|
override def onUpstreamFailure(ex: Throwable): Unit = {
|
||||||
|
remotePartner ! StreamRefs.RemoteSinkFailure(ex.getMessage) // TODO yes / no? At least the message I guess
|
||||||
|
self.unwatch(remotePartner)
|
||||||
|
super.onUpstreamFailure(ex)
|
||||||
|
}
|
||||||
|
|
||||||
|
override def onUpstreamFinish(): Unit = {
|
||||||
|
remotePartner ! StreamRefs.RemoteSinkCompleted(remoteCumulativeDemandConsumed)
|
||||||
|
self.unwatch(remotePartner)
|
||||||
|
super.onUpstreamFinish()
|
||||||
|
}
|
||||||
|
|
||||||
|
setHandler(in, this)
|
||||||
|
}
|
||||||
|
|
||||||
|
(logic, promise.future)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
///// ------------------------------------ FIXME THIS IS A VERBATIM COPY -----------------------------------
|
||||||
|
///// ------------------------------------ FIXME THIS IS A VERBATIM COPY -----------------------------------
|
||||||
|
///// ------------------------------------ FIXME THIS IS A VERBATIM COPY -----------------------------------
|
||||||
|
///// ------------------------------------ FIXME THIS IS A VERBATIM COPY -----------------------------------
|
||||||
|
/**
|
||||||
|
* This stage can only handle a single "sender" (it does not merge values);
|
||||||
|
* The first that pushes is assumed the one we are to trust
|
||||||
|
*/
|
||||||
|
// FIXME this is basically SinkRefTargetSource
|
||||||
|
final class SourceRef[T](private[akka] val originRef: ActorRef) extends GraphStageWithMaterializedValue[SourceShape[T], Future[SinkRef[T]]] {
|
||||||
|
val out: Outlet[T] = Outlet[T](s"${Logging.simpleName(getClass)}.out")
|
||||||
|
override def shape = SourceShape.of(out)
|
||||||
|
|
||||||
|
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = {
|
||||||
|
val promise = Promise[SinkRef[T]]()
|
||||||
|
|
||||||
|
val logic = new TimerGraphStageLogic(shape) with StageLogging with OutHandler {
|
||||||
|
private[this] lazy val streamRefsMaster = StreamRefsMaster(ActorMaterializerHelper.downcast(materializer).system)
|
||||||
|
private[this] lazy val settings = streamRefsMaster.settings
|
||||||
|
|
||||||
|
private[this] var self: GraphStageLogic.StageActor = _
|
||||||
|
private[this] lazy val selfActorName = streamRefsMaster.nextSinkRefTargetSourceName()
|
||||||
|
private[this] implicit def selfSender: ActorRef = self.ref
|
||||||
|
|
||||||
|
// demand management ---
|
||||||
|
private val highDemandWatermark = 16
|
||||||
|
|
||||||
|
private var expectingSeqNr: Long = 0L
|
||||||
|
private var localCumulativeDemand: Long = 0L // initialized in preStart with settings.initialDemand
|
||||||
|
|
||||||
|
private val receiveBuffer = FixedSizeBuffer[T](highDemandWatermark)
|
||||||
|
|
||||||
|
// TODO configurable?
|
||||||
|
// Request strategies talk in terms of Request(n), which we need to translate to cumulative demand
|
||||||
|
// TODO the MaxInFlightRequestStrategy is likely better for this use case, yet was a bit weird to use so this one for now
|
||||||
|
private val requestStrategy: RequestStrategy = WatermarkRequestStrategy(highWatermark = highDemandWatermark)
|
||||||
|
// end of demand management ---
|
||||||
|
|
||||||
|
// TODO we could basically use the other impl... and just pass null as originRef since it'd be obtained from other side...
|
||||||
|
private var remotePartner: ActorRef = originRef
|
||||||
|
|
||||||
|
override def preStart(): Unit = {
|
||||||
|
localCumulativeDemand = settings.initialDemand.toLong
|
||||||
|
|
||||||
|
self = getStageActor(initialReceive, name = selfActorName)
|
||||||
|
log.warning("Allocated receiver: {}", self.ref)
|
||||||
|
|
||||||
|
promise.success(new SinkRef(self.ref, settings.initialDemand))
|
||||||
|
}
|
||||||
|
|
||||||
|
override def onPull(): Unit = {
|
||||||
|
tryPush()
|
||||||
|
triggerCumulativeDemand()
|
||||||
|
}
|
||||||
|
|
||||||
|
def triggerCumulativeDemand(): Unit =
|
||||||
|
if (remotePartner ne null) {
|
||||||
|
val remainingRequested = java.lang.Long.min(highDemandWatermark, localCumulativeDemand - expectingSeqNr).toInt
|
||||||
|
val addDemand = requestStrategy.requestDemand(remainingRequested)
|
||||||
|
|
||||||
|
// only if demand has increased we shoot it right away
|
||||||
|
// otherwise it's the same demand level, so it'd be triggered via redelivery anyway
|
||||||
|
if (addDemand > 0) {
|
||||||
|
localCumulativeDemand += addDemand
|
||||||
|
val demand = StreamRefs.CumulativeDemand(localCumulativeDemand)
|
||||||
|
|
||||||
|
log.warning("[{}] Demanding until [{}] (+{})", selfActorName, localCumulativeDemand, addDemand)
|
||||||
|
remotePartner ! demand
|
||||||
|
scheduleDemandRedelivery()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
val DemandRedeliveryTimerKey = "DemandRedeliveryTimerKey"
|
||||||
|
def scheduleDemandRedelivery() = scheduleOnce(DemandRedeliveryTimerKey, settings.demandRedeliveryInterval)
|
||||||
|
override protected def onTimer(timerKey: Any): Unit = timerKey match {
|
||||||
|
case DemandRedeliveryTimerKey ⇒
|
||||||
|
log.debug("[{}] Scheduled re-delivery of demand until [{}]", selfActorName, localCumulativeDemand)
|
||||||
|
remotePartner ! StreamRefs.CumulativeDemand(localCumulativeDemand)
|
||||||
|
scheduleDemandRedelivery()
|
||||||
|
}
|
||||||
|
|
||||||
|
lazy val initialReceive: ((ActorRef, Any)) ⇒ Unit = {
|
||||||
|
case (sender, msg @ StreamRefs.SequencedOnNext(seqNr, payload)) ⇒
|
||||||
|
observeAndValidateSender(sender, "Illegal sender in SequencedOnNext")
|
||||||
|
observeAndValidateSequenceNr(seqNr, "Illegal sequence nr in SequencedOnNext")
|
||||||
|
log.warning("Received seq {} from {}", msg, sender)
|
||||||
|
|
||||||
|
triggerCumulativeDemand()
|
||||||
|
tryPush(payload)
|
||||||
|
|
||||||
|
case (sender, StreamRefs.RemoteSinkCompleted(seqNr)) ⇒
|
||||||
|
observeAndValidateSender(sender, "Illegal sender in RemoteSinkCompleted")
|
||||||
|
observeAndValidateSequenceNr(seqNr, "Illegal sequence nr in RemoteSinkCompleted")
|
||||||
|
log.debug("The remote Sink has completed, completing this source as well...")
|
||||||
|
|
||||||
|
self.unwatch(sender)
|
||||||
|
completeStage()
|
||||||
|
|
||||||
|
case (sender, StreamRefs.RemoteSinkFailure(reason)) ⇒
|
||||||
|
observeAndValidateSender(sender, "Illegal sender in RemoteSinkFailure")
|
||||||
|
log.debug("The remote Sink has failed, failing (reason: {})", reason)
|
||||||
|
|
||||||
|
self.unwatch(sender)
|
||||||
|
failStage(StreamRefs.RemoteStreamRefActorTerminatedException(s"Remote Sink failed, reason: $reason"))
|
||||||
|
}
|
||||||
|
|
||||||
|
def tryPush(): Unit =
|
||||||
|
if (isAvailable(out) && receiveBuffer.nonEmpty) {
|
||||||
|
val elem = receiveBuffer.dequeue()
|
||||||
|
log.warning(s"PUSHING SIGNALED ${elem} (capacity: ${receiveBuffer.used}/${receiveBuffer.capacity})")
|
||||||
|
push(out, elem)
|
||||||
|
}
|
||||||
|
def tryPush(payload: Any): Unit =
|
||||||
|
if (isAvailable(out)) {
|
||||||
|
if (receiveBuffer.nonEmpty) {
|
||||||
|
val elem = receiveBuffer.dequeue()
|
||||||
|
push(out, elem)
|
||||||
|
receiveBuffer.enqueue(payload.asInstanceOf[T])
|
||||||
|
log.warning(s"PUSHING SIGNALED ${elem} BUFFERING payload" + payload + s"(capacity: ${receiveBuffer.used}/${receiveBuffer.capacity})")
|
||||||
|
} else {
|
||||||
|
push(out, payload.asInstanceOf[T])
|
||||||
|
log.warning(s"PUSHING DIRECTLY ${payload}")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
receiveBuffer.enqueue(payload.asInstanceOf[T])
|
||||||
|
log.warning("PUSHING BUFFERING payload" + payload + s"(capacity: ${receiveBuffer.used}/${receiveBuffer.capacity})")
|
||||||
|
}
|
||||||
|
|
||||||
|
@throws[StreamRefs.InvalidPartnerActorException]
|
||||||
|
def observeAndValidateSender(sender: ActorRef, msg: String): Unit =
|
||||||
|
if (remotePartner == null) {
|
||||||
|
log.debug("Received first message from {}, assuming it to be the remote partner for this stage", sender)
|
||||||
|
remotePartner = sender
|
||||||
|
self.watch(sender)
|
||||||
|
} else if (sender != remotePartner) {
|
||||||
|
throw StreamRefs.InvalidPartnerActorException(sender, remotePartner, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
@throws[StreamRefs.InvalidSequenceNumberException]
|
||||||
|
def observeAndValidateSequenceNr(seqNr: Long, msg: String): Unit =
|
||||||
|
if (isInvalidSequenceNr(seqNr)) {
|
||||||
|
throw StreamRefs.InvalidSequenceNumberException(expectingSeqNr, seqNr, msg)
|
||||||
|
} else {
|
||||||
|
expectingSeqNr += 1
|
||||||
|
}
|
||||||
|
def isInvalidSequenceNr(seqNr: Long): Boolean =
|
||||||
|
seqNr != expectingSeqNr
|
||||||
|
|
||||||
|
setHandler(out, this)
|
||||||
|
}
|
||||||
|
(logic, promise.future) // FIXME we'd want to expose just the ref!
|
||||||
|
}
|
||||||
|
|
||||||
|
override def toString: String =
|
||||||
|
s"${Logging.simpleName(getClass)}($originRef)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
///// ------------------------------------ FIXME END OF THIS IS A VERBATIM COPY ----------------------------
|
||||||
|
///// ------------------------------------ FIXME END OF THIS IS A VERBATIM COPY ----------------------------
|
||||||
|
///// ------------------------------------ FIXME END OF THIS IS A VERBATIM COPY ----------------------------
|
||||||
|
///// ------------------------------------ FIXME END OF THIS IS A VERBATIM COPY ----------------------------
|
||||||
|
///// ------------------------------------ FIXME END OF THIS IS A VERBATIM COPY ----------------------------
|
||||||
|
|
@ -0,0 +1,19 @@
|
||||||
|
/**
|
||||||
|
* Copyright (C) 2014-2017 Lightbend Inc. <http://www.lightbend.com>
|
||||||
|
*/
|
||||||
|
package akka.stream.remote.scaladsl
|
||||||
|
|
||||||
|
import java.util.concurrent.TimeUnit
|
||||||
|
|
||||||
|
import scala.concurrent.duration._
|
||||||
|
import com.typesafe.config.Config
|
||||||
|
|
||||||
|
final class StreamRefSettings(config: Config) {
|
||||||
|
private val c = config.getConfig("akka.stream.stream-refs")
|
||||||
|
|
||||||
|
val initialDemand = c.getInt("initial-demand")
|
||||||
|
|
||||||
|
val demandRedeliveryInterval = c.getDuration("demand-redelivery-interval", TimeUnit.MILLISECONDS).millis
|
||||||
|
|
||||||
|
val idleTimeout = c.getDuration("idle-timeout", TimeUnit.MILLISECONDS).millis
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,160 @@
|
||||||
|
/**
|
||||||
|
* Copyright (C) 2009-2017 Lightbend Inc. <http://www.lightbend.com>
|
||||||
|
*/
|
||||||
|
package akka.stream.remote.serialization
|
||||||
|
|
||||||
|
import akka.actor.ExtendedActorSystem
|
||||||
|
import akka.protobuf.ByteString
|
||||||
|
import akka.serialization.{ BaseSerializer, Serialization, SerializationExtension, SerializerWithStringManifest }
|
||||||
|
import akka.stream.remote.scaladsl.{ SinkRef, SourceRef }
|
||||||
|
import akka.stream.remote.{ StreamRefContainers, StreamRefs }
|
||||||
|
|
||||||
|
final class StreamRefSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest
|
||||||
|
with BaseSerializer {
|
||||||
|
|
||||||
|
private[this] lazy val serialization = SerializationExtension(system)
|
||||||
|
|
||||||
|
private[this] val SequencedOnNextManifest = "A"
|
||||||
|
private[this] val CumulativeDemandManifest = "B"
|
||||||
|
private[this] val RemoteSinkFailureManifest = "C"
|
||||||
|
private[this] val RemoteSinkCompletedManifest = "D"
|
||||||
|
private[this] val SourceRefManifest = "E"
|
||||||
|
private[this] val SinkRefManifest = "F"
|
||||||
|
|
||||||
|
override def manifest(o: AnyRef): String = o match {
|
||||||
|
// protocol
|
||||||
|
case _: StreamRefs.SequencedOnNext[_] ⇒ SequencedOnNextManifest
|
||||||
|
case _: StreamRefs.CumulativeDemand ⇒ CumulativeDemandManifest
|
||||||
|
case _: StreamRefs.RemoteSinkFailure ⇒ RemoteSinkFailureManifest
|
||||||
|
case _: StreamRefs.RemoteSinkCompleted ⇒ RemoteSinkCompletedManifest
|
||||||
|
// refs
|
||||||
|
case _: SourceRef[_] ⇒ SourceRefManifest
|
||||||
|
case _: SinkRef[_] ⇒ SinkRefManifest
|
||||||
|
}
|
||||||
|
|
||||||
|
override def toBinary(o: AnyRef): Array[Byte] = o match {
|
||||||
|
// protocol
|
||||||
|
case o: StreamRefs.SequencedOnNext[_] ⇒ serializeSequencedOnNext(o).toByteArray
|
||||||
|
case d: StreamRefs.CumulativeDemand ⇒ serializeCumulativeDemand(d).toByteArray
|
||||||
|
case d: StreamRefs.RemoteSinkFailure ⇒ serializeRemoteSinkFailure(d).toByteArray
|
||||||
|
case d: StreamRefs.RemoteSinkCompleted ⇒ serializeRemoteSinkCompleted(d).toByteArray
|
||||||
|
// refs
|
||||||
|
case ref: SinkRef[_] ⇒ serializeSinkRef(ref).toByteArray
|
||||||
|
case ref: SourceRef[_] ⇒ serializeSourceRef(ref).toByteArray
|
||||||
|
}
|
||||||
|
|
||||||
|
override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = manifest match {
|
||||||
|
// protocol
|
||||||
|
case SequencedOnNextManifest ⇒ deserializeSequencedOnNext(bytes)
|
||||||
|
case CumulativeDemandManifest ⇒ deserializeCumulativeDemand(bytes)
|
||||||
|
case RemoteSinkCompletedManifest ⇒ deserializeRemoteSinkCompleted(bytes)
|
||||||
|
case RemoteSinkFailureManifest ⇒ deserializeRemoteSinkFailure(bytes)
|
||||||
|
// refs
|
||||||
|
case SinkRefManifest ⇒ deserializeSinkRef(bytes)
|
||||||
|
case SourceRefManifest ⇒ deserializeSourceRef(bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// -----
|
||||||
|
|
||||||
|
private def serializeCumulativeDemand(d: StreamRefs.CumulativeDemand): StreamRefContainers.CumulativeDemand = {
|
||||||
|
StreamRefContainers.CumulativeDemand.newBuilder()
|
||||||
|
.setSeqNr(d.seqNr)
|
||||||
|
.build()
|
||||||
|
}
|
||||||
|
|
||||||
|
private def serializeRemoteSinkFailure(d: StreamRefs.RemoteSinkFailure): StreamRefContainers.RemoteSinkFailure = {
|
||||||
|
StreamRefContainers.RemoteSinkFailure.newBuilder()
|
||||||
|
.setCause(ByteString.copyFrom(d.msg.getBytes))
|
||||||
|
.build()
|
||||||
|
}
|
||||||
|
|
||||||
|
private def serializeRemoteSinkCompleted(d: StreamRefs.RemoteSinkCompleted): StreamRefContainers.RemoteSinkCompleted = {
|
||||||
|
StreamRefContainers.RemoteSinkCompleted.newBuilder()
|
||||||
|
.setSeqNr(d.seqNr)
|
||||||
|
.build()
|
||||||
|
}
|
||||||
|
|
||||||
|
private def serializeSequencedOnNext(o: StreamRefs.SequencedOnNext[_]) = {
|
||||||
|
val p = o.payload.asInstanceOf[AnyRef]
|
||||||
|
val msgSerializer = serialization.findSerializerFor(p)
|
||||||
|
|
||||||
|
val payloadBuilder = StreamRefContainers.Payload.newBuilder()
|
||||||
|
.setEnclosedMessage(ByteString.copyFrom(msgSerializer.toBinary(p)))
|
||||||
|
.setSerializerId(msgSerializer.identifier)
|
||||||
|
|
||||||
|
msgSerializer match {
|
||||||
|
case ser2: SerializerWithStringManifest ⇒
|
||||||
|
val manifest = ser2.manifest(p)
|
||||||
|
if (manifest != "")
|
||||||
|
payloadBuilder.setMessageManifest(ByteString.copyFromUtf8(manifest))
|
||||||
|
case _ ⇒
|
||||||
|
if (msgSerializer.includeManifest)
|
||||||
|
payloadBuilder.setMessageManifest(ByteString.copyFromUtf8(p.getClass.getName))
|
||||||
|
}
|
||||||
|
|
||||||
|
StreamRefContainers.SequencedOnNext.newBuilder()
|
||||||
|
.setSeqNr(o.seqNr)
|
||||||
|
.setPayload(payloadBuilder.build())
|
||||||
|
.build()
|
||||||
|
}
|
||||||
|
|
||||||
|
private def serializeSinkRef(sink: SinkRef[_]): StreamRefContainers.SinkRef = {
|
||||||
|
val actorRef = StreamRefContainers.ActorRef.newBuilder()
|
||||||
|
.setPath(Serialization.serializedActorPath(sink.targetRef))
|
||||||
|
|
||||||
|
StreamRefContainers.SinkRef.newBuilder()
|
||||||
|
.setInitialDemand(sink.initialDemand)
|
||||||
|
.setTargetRef(actorRef)
|
||||||
|
.build()
|
||||||
|
}
|
||||||
|
|
||||||
|
private def serializeSourceRef(source: SourceRef[_]): StreamRefContainers.SourceRef = {
|
||||||
|
val actorRef = StreamRefContainers.ActorRef.newBuilder()
|
||||||
|
.setPath(Serialization.serializedActorPath(source.originRef))
|
||||||
|
|
||||||
|
StreamRefContainers.SourceRef.newBuilder()
|
||||||
|
.setOriginRef(actorRef)
|
||||||
|
.build()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------
|
||||||
|
|
||||||
|
private def deserializeSinkRef(bytes: Array[Byte]): SinkRef[Any] = {
|
||||||
|
val ref = StreamRefContainers.SinkRef.parseFrom(bytes)
|
||||||
|
val targetRef = serialization.system.provider.resolveActorRef(ref.getTargetRef.getPath)
|
||||||
|
|
||||||
|
new SinkRef[Any](targetRef, ref.getInitialDemand)
|
||||||
|
}
|
||||||
|
|
||||||
|
private def deserializeSourceRef(bytes: Array[Byte]): SourceRef[Any] = {
|
||||||
|
val ref = StreamRefContainers.SourceRef.parseFrom(bytes)
|
||||||
|
val targetRef = serialization.system.provider.resolveActorRef(ref.getOriginRef.getPath)
|
||||||
|
|
||||||
|
new SourceRef[Any](targetRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
private def deserializeSequencedOnNext(bytes: Array[Byte]): AnyRef = {
|
||||||
|
val o = StreamRefContainers.SequencedOnNext.parseFrom(bytes)
|
||||||
|
val p = o.getPayload
|
||||||
|
val payload = serialization.deserialize(
|
||||||
|
p.getEnclosedMessage.toByteArray,
|
||||||
|
p.getSerializerId,
|
||||||
|
p.getMessageManifest.toStringUtf8
|
||||||
|
)
|
||||||
|
StreamRefs.SequencedOnNext(o.getSeqNr, payload.get)
|
||||||
|
}
|
||||||
|
|
||||||
|
private def deserializeCumulativeDemand(bytes: Array[Byte]): StreamRefs.CumulativeDemand = {
|
||||||
|
val d = StreamRefContainers.CumulativeDemand.parseFrom(bytes)
|
||||||
|
StreamRefs.CumulativeDemand(d.getSeqNr)
|
||||||
|
}
|
||||||
|
private def deserializeRemoteSinkCompleted(bytes: Array[Byte]): StreamRefs.RemoteSinkCompleted = {
|
||||||
|
val d = StreamRefContainers.RemoteSinkCompleted.parseFrom(bytes)
|
||||||
|
StreamRefs.RemoteSinkCompleted(d.getSeqNr)
|
||||||
|
}
|
||||||
|
private def deserializeRemoteSinkFailure(bytes: Array[Byte]): AnyRef = {
|
||||||
|
val d = StreamRefContainers.RemoteSinkFailure.parseFrom(bytes)
|
||||||
|
StreamRefs.RemoteSinkFailure(d.getCause.toStringUtf8)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -177,11 +177,14 @@ object GraphStageLogic {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Minimal actor to work with other actors and watch them in a synchronous ways
|
* Minimal actor to work with other actors and watch them in a synchronous ways
|
||||||
|
*
|
||||||
|
* @param name leave empty to use plain auto generated names
|
||||||
*/
|
*/
|
||||||
final class StageActor(
|
final class StageActor(
|
||||||
materializer: ActorMaterializer,
|
materializer: ActorMaterializer,
|
||||||
getAsyncCallback: StageActorRef.Receive ⇒ AsyncCallback[(ActorRef, Any)],
|
getAsyncCallback: StageActorRef.Receive ⇒ AsyncCallback[(ActorRef, Any)],
|
||||||
initialReceive: StageActorRef.Receive) {
|
initialReceive: StageActorRef.Receive,
|
||||||
|
name: String) {
|
||||||
|
|
||||||
private val callback = getAsyncCallback(internalReceive)
|
private val callback = getAsyncCallback(internalReceive)
|
||||||
private def cell = materializer.supervisor match {
|
private def cell = materializer.supervisor match {
|
||||||
|
|
@ -191,14 +194,13 @@ object GraphStageLogic {
|
||||||
throw new IllegalStateException(s"Stream supervisor must be a local actor, was [${unknown.getClass.getName}]")
|
throw new IllegalStateException(s"Stream supervisor must be a local actor, was [${unknown.getClass.getName}]")
|
||||||
}
|
}
|
||||||
|
|
||||||
private val functionRef: FunctionRef = {
|
private val functionRef: FunctionRef =
|
||||||
cell.addFunctionRef {
|
cell.addFunctionRef({
|
||||||
case (_, m @ (PoisonPill | Kill)) ⇒
|
case (_, m @ (PoisonPill | Kill)) ⇒
|
||||||
materializer.logger.warning("{} message sent to StageActor({}) will be ignored, since it is not a real Actor." +
|
materializer.logger.warning("{} message sent to StageActor({}) will be ignored, since it is not a real Actor." +
|
||||||
"Use a custom message type to communicate with it instead.", m, functionRef.path)
|
"Use a custom message type to communicate with it instead.", m, functionRef.path)
|
||||||
case pair ⇒ callback.invoke(pair)
|
case pair ⇒ callback.invoke(pair)
|
||||||
}
|
}, name)
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The ActorRef by which this StageActor can be contacted from the outside.
|
* The ActorRef by which this StageActor can be contacted from the outside.
|
||||||
|
|
@ -1162,12 +1164,38 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
*/
|
*/
|
||||||
// FIXME: I don't like the Pair allocation :(
|
// FIXME: I don't like the Pair allocation :(
|
||||||
@ApiMayChange
|
@ApiMayChange
|
||||||
final protected def getStageActor(receive: ((ActorRef, Any)) ⇒ Unit): StageActor = {
|
final protected def getStageActor(receive: ((ActorRef, Any)) ⇒ Unit): StageActor =
|
||||||
|
getStageActor(receive, name = "")
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize a named [[StageActorRef]] which can be used to interact with from the outside world "as-if" an [[Actor]].
|
||||||
|
* The messages are looped through the [[getAsyncCallback]] mechanism of [[GraphStage]] so they are safe to modify
|
||||||
|
* internal state of this stage.
|
||||||
|
*
|
||||||
|
* This method must (the earliest) be called after the [[GraphStageLogic]] constructor has finished running,
|
||||||
|
* for example from the [[preStart]] callback the graph stage logic provides.
|
||||||
|
*
|
||||||
|
* Created [[StageActorRef]] to get messages and watch other actors in synchronous way.
|
||||||
|
*
|
||||||
|
* The [[StageActorRef]]'s lifecycle is bound to the Stage, in other words when the Stage is finished,
|
||||||
|
* the Actor will be terminated as well. The entity backing the [[StageActorRef]] is not a real Actor,
|
||||||
|
* but the [[GraphStageLogic]] itself, therefore it does not react to [[PoisonPill]].
|
||||||
|
*
|
||||||
|
* @param receive callback that will be called upon receiving of a message by this special Actor
|
||||||
|
* @param name to be used in the Actor's name. An empty String will cause the usual auto generated name to be used
|
||||||
|
* @return minimal actor with watch method
|
||||||
|
*/
|
||||||
|
@ApiMayChange
|
||||||
|
final protected def getStageActor(receive: ((ActorRef, Any)) ⇒ Unit, name: String): StageActor = {
|
||||||
_stageActor match {
|
_stageActor match {
|
||||||
case null ⇒
|
case null ⇒
|
||||||
val actorMaterializer = ActorMaterializerHelper.downcast(interpreter.materializer)
|
val actorMaterializer = ActorMaterializerHelper.downcast(interpreter.materializer)
|
||||||
_stageActor = new StageActor(actorMaterializer, getAsyncCallback, receive)
|
_stageActor = new StageActor(actorMaterializer, getAsyncCallback, receive, name)
|
||||||
_stageActor
|
_stageActor
|
||||||
|
case existing if name != "" && existing.ref.path.name != name ⇒
|
||||||
|
throw new IllegalArgumentException(s"Illegal name argument ($name) in getStageActor! " +
|
||||||
|
s"It is not legal to change the name of the allocated stage actor. " +
|
||||||
|
s"The existing Actor is named [${existing.ref.path.name}], and must remain such throughout all getStageActor calls.")
|
||||||
case existing ⇒
|
case existing ⇒
|
||||||
existing.become(receive)
|
existing.become(receive)
|
||||||
existing
|
existing
|
||||||
|
|
|
||||||
|
|
@ -323,10 +323,11 @@ lazy val slf4j = akkaModule("akka-slf4j")
|
||||||
.settings(OSGi.slf4j)
|
.settings(OSGi.slf4j)
|
||||||
|
|
||||||
lazy val stream = akkaModule("akka-stream")
|
lazy val stream = akkaModule("akka-stream")
|
||||||
.dependsOn(actor)
|
.dependsOn(actor, protobuf)
|
||||||
.settings(Dependencies.stream)
|
.settings(Dependencies.stream)
|
||||||
.settings(AutomaticModuleName.settings("akka.stream"))
|
.settings(AutomaticModuleName.settings("akka.stream"))
|
||||||
.settings(OSGi.stream)
|
.settings(OSGi.stream)
|
||||||
|
.settings(Protobuf.settings)
|
||||||
.enablePlugins(BoilerplatePlugin)
|
.enablePlugins(BoilerplatePlugin)
|
||||||
|
|
||||||
lazy val streamTestkit = akkaModule("akka-stream-testkit")
|
lazy val streamTestkit = akkaModule("akka-stream-testkit")
|
||||||
|
|
@ -337,7 +338,7 @@ lazy val streamTestkit = akkaModule("akka-stream-testkit")
|
||||||
.disablePlugins(MimaPlugin)
|
.disablePlugins(MimaPlugin)
|
||||||
|
|
||||||
lazy val streamTests = akkaModule("akka-stream-tests")
|
lazy val streamTests = akkaModule("akka-stream-tests")
|
||||||
.dependsOn(streamTestkit % "test->test", stream)
|
.dependsOn(streamTestkit % "test->test", remote % "test->test", stream)
|
||||||
.settings(Dependencies.streamTests)
|
.settings(Dependencies.streamTests)
|
||||||
.enablePlugins(NoPublish)
|
.enablePlugins(NoPublish)
|
||||||
.disablePlugins(MimaPlugin, WhiteSourcePlugin)
|
.disablePlugins(MimaPlugin, WhiteSourcePlugin)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue