();
- mutable_bitField0_ |= 0x00000001;
+ mutable_bitField0_ |= 0x00000002;
}
entries_.add(input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry.PARSER, extensionRegistry));
break;
@@ -12571,7 +12802,7 @@ public final class ReplicatorMessages {
throw new akka.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
- if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
entries_ = java.util.Collections.unmodifiableList(entries_);
}
this.unknownFields = unknownFields.build();
@@ -12636,6 +12867,34 @@ public final class ReplicatorMessages {
* required .akka.cluster.ddata.DataEnvelope envelope = 2;
*/
akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder getEnvelopeOrBuilder();
+
+ // required int64 fromSeqNr = 3;
+ /**
+ * required int64 fromSeqNr = 3;
+ */
+ boolean hasFromSeqNr();
+ /**
+ * required int64 fromSeqNr = 3;
+ */
+ long getFromSeqNr();
+
+ // optional int64 toSeqNr = 4;
+ /**
+ * optional int64 toSeqNr = 4;
+ *
+ *
+ * if not set then same as fromSequenceNr
+ *
+ */
+ boolean hasToSeqNr();
+ /**
+ * optional int64 toSeqNr = 4;
+ *
+ *
+ * if not set then same as fromSequenceNr
+ *
+ */
+ long getToSeqNr();
}
/**
* Protobuf type {@code akka.cluster.ddata.DeltaPropagation.Entry}
@@ -12706,6 +12965,16 @@ public final class ReplicatorMessages {
bitField0_ |= 0x00000002;
break;
}
+ case 24: {
+ bitField0_ |= 0x00000004;
+ fromSeqNr_ = input.readInt64();
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000008;
+ toSeqNr_ = input.readInt64();
+ break;
+ }
}
}
} catch (akka.protobuf.InvalidProtocolBufferException e) {
@@ -12811,9 +13080,51 @@ public final class ReplicatorMessages {
return envelope_;
}
+ // required int64 fromSeqNr = 3;
+ public static final int FROMSEQNR_FIELD_NUMBER = 3;
+ private long fromSeqNr_;
+ /**
+ * required int64 fromSeqNr = 3;
+ */
+ public boolean hasFromSeqNr() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required int64 fromSeqNr = 3;
+ */
+ public long getFromSeqNr() {
+ return fromSeqNr_;
+ }
+
+ // optional int64 toSeqNr = 4;
+ public static final int TOSEQNR_FIELD_NUMBER = 4;
+ private long toSeqNr_;
+ /**
+ * optional int64 toSeqNr = 4;
+ *
+ *
+ * if not set then same as fromSequenceNr
+ *
+ */
+ public boolean hasToSeqNr() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional int64 toSeqNr = 4;
+ *
+ *
+ * if not set then same as fromSequenceNr
+ *
+ */
+ public long getToSeqNr() {
+ return toSeqNr_;
+ }
+
private void initFields() {
key_ = "";
envelope_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance();
+ fromSeqNr_ = 0L;
+ toSeqNr_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -12828,6 +13139,10 @@ public final class ReplicatorMessages {
memoizedIsInitialized = 0;
return false;
}
+ if (!hasFromSeqNr()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
if (!getEnvelope().isInitialized()) {
memoizedIsInitialized = 0;
return false;
@@ -12845,6 +13160,12 @@ public final class ReplicatorMessages {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, envelope_);
}
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt64(3, fromSeqNr_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeInt64(4, toSeqNr_);
+ }
getUnknownFields().writeTo(output);
}
@@ -12862,6 +13183,14 @@ public final class ReplicatorMessages {
size += akka.protobuf.CodedOutputStream
.computeMessageSize(2, envelope_);
}
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += akka.protobuf.CodedOutputStream
+ .computeInt64Size(3, fromSeqNr_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += akka.protobuf.CodedOutputStream
+ .computeInt64Size(4, toSeqNr_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -12987,6 +13316,10 @@ public final class ReplicatorMessages {
envelopeBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
+ fromSeqNr_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ toSeqNr_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
@@ -13027,6 +13360,14 @@ public final class ReplicatorMessages {
} else {
result.envelope_ = envelopeBuilder_.build();
}
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.fromSeqNr_ = fromSeqNr_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.toSeqNr_ = toSeqNr_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -13051,6 +13392,12 @@ public final class ReplicatorMessages {
if (other.hasEnvelope()) {
mergeEnvelope(other.getEnvelope());
}
+ if (other.hasFromSeqNr()) {
+ setFromSeqNr(other.getFromSeqNr());
+ }
+ if (other.hasToSeqNr()) {
+ setToSeqNr(other.getToSeqNr());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -13064,6 +13411,10 @@ public final class ReplicatorMessages {
return false;
}
+ if (!hasFromSeqNr()) {
+
+ return false;
+ }
if (!getEnvelope().isInitialized()) {
return false;
@@ -13281,6 +13632,88 @@ public final class ReplicatorMessages {
return envelopeBuilder_;
}
+ // required int64 fromSeqNr = 3;
+ private long fromSeqNr_ ;
+ /**
+ * required int64 fromSeqNr = 3;
+ */
+ public boolean hasFromSeqNr() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required int64 fromSeqNr = 3;
+ */
+ public long getFromSeqNr() {
+ return fromSeqNr_;
+ }
+ /**
+ * required int64 fromSeqNr = 3;
+ */
+ public Builder setFromSeqNr(long value) {
+ bitField0_ |= 0x00000004;
+ fromSeqNr_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required int64 fromSeqNr = 3;
+ */
+ public Builder clearFromSeqNr() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ fromSeqNr_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional int64 toSeqNr = 4;
+ private long toSeqNr_ ;
+ /**
+ * optional int64 toSeqNr = 4;
+ *
+ *
+ * if not set then same as fromSequenceNr
+ *
+ */
+ public boolean hasToSeqNr() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional int64 toSeqNr = 4;
+ *
+ *
+ * if not set then same as fromSequenceNr
+ *
+ */
+ public long getToSeqNr() {
+ return toSeqNr_;
+ }
+ /**
+ * optional int64 toSeqNr = 4;
+ *
+ *
+ * if not set then same as fromSequenceNr
+ *
+ */
+ public Builder setToSeqNr(long value) {
+ bitField0_ |= 0x00000008;
+ toSeqNr_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int64 toSeqNr = 4;
+ *
+ *
+ * if not set then same as fromSequenceNr
+ *
+ */
+ public Builder clearToSeqNr() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ toSeqNr_ = 0L;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:akka.cluster.ddata.DeltaPropagation.Entry)
}
@@ -13292,36 +13725,59 @@ public final class ReplicatorMessages {
// @@protoc_insertion_point(class_scope:akka.cluster.ddata.DeltaPropagation.Entry)
}
- // repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
- public static final int ENTRIES_FIELD_NUMBER = 1;
+ private int bitField0_;
+ // required .akka.cluster.ddata.UniqueAddress fromNode = 1;
+ public static final int FROMNODE_FIELD_NUMBER = 1;
+ private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress fromNode_;
+ /**
+ * required .akka.cluster.ddata.UniqueAddress fromNode = 1;
+ */
+ public boolean hasFromNode() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress fromNode = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getFromNode() {
+ return fromNode_;
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress fromNode = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getFromNodeOrBuilder() {
+ return fromNode_;
+ }
+
+ // repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
+ public static final int ENTRIES_FIELD_NUMBER = 2;
private java.util.List entries_;
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public java.util.List getEntriesList() {
return entries_;
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public java.util.List extends akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.EntryOrBuilder>
getEntriesOrBuilderList() {
return entries_;
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public int getEntriesCount() {
return entries_.size();
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry getEntries(int index) {
return entries_.get(index);
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.EntryOrBuilder getEntriesOrBuilder(
int index) {
@@ -13329,6 +13785,7 @@ public final class ReplicatorMessages {
}
private void initFields() {
+ fromNode_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance();
entries_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
@@ -13336,6 +13793,14 @@ public final class ReplicatorMessages {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
+ if (!hasFromNode()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getFromNode().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
for (int i = 0; i < getEntriesCount(); i++) {
if (!getEntries(i).isInitialized()) {
memoizedIsInitialized = 0;
@@ -13349,8 +13814,11 @@ public final class ReplicatorMessages {
public void writeTo(akka.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, fromNode_);
+ }
for (int i = 0; i < entries_.size(); i++) {
- output.writeMessage(1, entries_.get(i));
+ output.writeMessage(2, entries_.get(i));
}
getUnknownFields().writeTo(output);
}
@@ -13361,9 +13829,13 @@ public final class ReplicatorMessages {
if (size != -1) return size;
size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += akka.protobuf.CodedOutputStream
+ .computeMessageSize(1, fromNode_);
+ }
for (int i = 0; i < entries_.size(); i++) {
size += akka.protobuf.CodedOutputStream
- .computeMessageSize(1, entries_.get(i));
+ .computeMessageSize(2, entries_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@@ -13473,6 +13945,7 @@ public final class ReplicatorMessages {
}
private void maybeForceBuilderInitialization() {
if (akka.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getFromNodeFieldBuilder();
getEntriesFieldBuilder();
}
}
@@ -13482,9 +13955,15 @@ public final class ReplicatorMessages {
public Builder clear() {
super.clear();
+ if (fromNodeBuilder_ == null) {
+ fromNode_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance();
+ } else {
+ fromNodeBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
if (entriesBuilder_ == null) {
entries_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000001);
+ bitField0_ = (bitField0_ & ~0x00000002);
} else {
entriesBuilder_.clear();
}
@@ -13515,15 +13994,25 @@ public final class ReplicatorMessages {
public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation buildPartial() {
akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation(this);
int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (fromNodeBuilder_ == null) {
+ result.fromNode_ = fromNode_;
+ } else {
+ result.fromNode_ = fromNodeBuilder_.build();
+ }
if (entriesBuilder_ == null) {
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
entries_ = java.util.Collections.unmodifiableList(entries_);
- bitField0_ = (bitField0_ & ~0x00000001);
+ bitField0_ = (bitField0_ & ~0x00000002);
}
result.entries_ = entries_;
} else {
result.entries_ = entriesBuilder_.build();
}
+ result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@@ -13539,11 +14028,14 @@ public final class ReplicatorMessages {
public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation other) {
if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.getDefaultInstance()) return this;
+ if (other.hasFromNode()) {
+ mergeFromNode(other.getFromNode());
+ }
if (entriesBuilder_ == null) {
if (!other.entries_.isEmpty()) {
if (entries_.isEmpty()) {
entries_ = other.entries_;
- bitField0_ = (bitField0_ & ~0x00000001);
+ bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureEntriesIsMutable();
entries_.addAll(other.entries_);
@@ -13556,7 +14048,7 @@ public final class ReplicatorMessages {
entriesBuilder_.dispose();
entriesBuilder_ = null;
entries_ = other.entries_;
- bitField0_ = (bitField0_ & ~0x00000001);
+ bitField0_ = (bitField0_ & ~0x00000002);
entriesBuilder_ =
akka.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getEntriesFieldBuilder() : null;
@@ -13570,6 +14062,14 @@ public final class ReplicatorMessages {
}
public final boolean isInitialized() {
+ if (!hasFromNode()) {
+
+ return false;
+ }
+ if (!getFromNode().isInitialized()) {
+
+ return false;
+ }
for (int i = 0; i < getEntriesCount(); i++) {
if (!getEntries(i).isInitialized()) {
@@ -13598,13 +14098,130 @@ public final class ReplicatorMessages {
}
private int bitField0_;
- // repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ // required .akka.cluster.ddata.UniqueAddress fromNode = 1;
+ private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress fromNode_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance();
+ private akka.protobuf.SingleFieldBuilder<
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder> fromNodeBuilder_;
+ /**
+ * required .akka.cluster.ddata.UniqueAddress fromNode = 1;
+ */
+ public boolean hasFromNode() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress fromNode = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getFromNode() {
+ if (fromNodeBuilder_ == null) {
+ return fromNode_;
+ } else {
+ return fromNodeBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress fromNode = 1;
+ */
+ public Builder setFromNode(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress value) {
+ if (fromNodeBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ fromNode_ = value;
+ onChanged();
+ } else {
+ fromNodeBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress fromNode = 1;
+ */
+ public Builder setFromNode(
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder builderForValue) {
+ if (fromNodeBuilder_ == null) {
+ fromNode_ = builderForValue.build();
+ onChanged();
+ } else {
+ fromNodeBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress fromNode = 1;
+ */
+ public Builder mergeFromNode(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress value) {
+ if (fromNodeBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ fromNode_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance()) {
+ fromNode_ =
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.newBuilder(fromNode_).mergeFrom(value).buildPartial();
+ } else {
+ fromNode_ = value;
+ }
+ onChanged();
+ } else {
+ fromNodeBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress fromNode = 1;
+ */
+ public Builder clearFromNode() {
+ if (fromNodeBuilder_ == null) {
+ fromNode_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance();
+ onChanged();
+ } else {
+ fromNodeBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress fromNode = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder getFromNodeBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getFromNodeFieldBuilder().getBuilder();
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress fromNode = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getFromNodeOrBuilder() {
+ if (fromNodeBuilder_ != null) {
+ return fromNodeBuilder_.getMessageOrBuilder();
+ } else {
+ return fromNode_;
+ }
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress fromNode = 1;
+ */
+ private akka.protobuf.SingleFieldBuilder<
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder>
+ getFromNodeFieldBuilder() {
+ if (fromNodeBuilder_ == null) {
+ fromNodeBuilder_ = new akka.protobuf.SingleFieldBuilder<
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder>(
+ fromNode_,
+ getParentForChildren(),
+ isClean());
+ fromNode_ = null;
+ }
+ return fromNodeBuilder_;
+ }
+
+ // repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
private java.util.List entries_ =
java.util.Collections.emptyList();
private void ensureEntriesIsMutable() {
- if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
entries_ = new java.util.ArrayList(entries_);
- bitField0_ |= 0x00000001;
+ bitField0_ |= 0x00000002;
}
}
@@ -13612,7 +14229,7 @@ public final class ReplicatorMessages {
akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.EntryOrBuilder> entriesBuilder_;
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public java.util.List getEntriesList() {
if (entriesBuilder_ == null) {
@@ -13622,7 +14239,7 @@ public final class ReplicatorMessages {
}
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public int getEntriesCount() {
if (entriesBuilder_ == null) {
@@ -13632,7 +14249,7 @@ public final class ReplicatorMessages {
}
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry getEntries(int index) {
if (entriesBuilder_ == null) {
@@ -13642,7 +14259,7 @@ public final class ReplicatorMessages {
}
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public Builder setEntries(
int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry value) {
@@ -13659,7 +14276,7 @@ public final class ReplicatorMessages {
return this;
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public Builder setEntries(
int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry.Builder builderForValue) {
@@ -13673,7 +14290,7 @@ public final class ReplicatorMessages {
return this;
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public Builder addEntries(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry value) {
if (entriesBuilder_ == null) {
@@ -13689,7 +14306,7 @@ public final class ReplicatorMessages {
return this;
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public Builder addEntries(
int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry value) {
@@ -13706,7 +14323,7 @@ public final class ReplicatorMessages {
return this;
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public Builder addEntries(
akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry.Builder builderForValue) {
@@ -13720,7 +14337,7 @@ public final class ReplicatorMessages {
return this;
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public Builder addEntries(
int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry.Builder builderForValue) {
@@ -13734,7 +14351,7 @@ public final class ReplicatorMessages {
return this;
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public Builder addAllEntries(
java.lang.Iterable extends akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry> values) {
@@ -13748,12 +14365,12 @@ public final class ReplicatorMessages {
return this;
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public Builder clearEntries() {
if (entriesBuilder_ == null) {
entries_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000001);
+ bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
entriesBuilder_.clear();
@@ -13761,7 +14378,7 @@ public final class ReplicatorMessages {
return this;
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public Builder removeEntries(int index) {
if (entriesBuilder_ == null) {
@@ -13774,14 +14391,14 @@ public final class ReplicatorMessages {
return this;
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry.Builder getEntriesBuilder(
int index) {
return getEntriesFieldBuilder().getBuilder(index);
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.EntryOrBuilder getEntriesOrBuilder(
int index) {
@@ -13791,7 +14408,7 @@ public final class ReplicatorMessages {
}
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public java.util.List extends akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.EntryOrBuilder>
getEntriesOrBuilderList() {
@@ -13802,14 +14419,14 @@ public final class ReplicatorMessages {
}
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry.Builder addEntriesBuilder() {
return getEntriesFieldBuilder().addBuilder(
akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry.getDefaultInstance());
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry.Builder addEntriesBuilder(
int index) {
@@ -13817,7 +14434,7 @@ public final class ReplicatorMessages {
index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry.getDefaultInstance());
}
/**
- * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 1;
+ * repeated .akka.cluster.ddata.DeltaPropagation.Entry entries = 2;
*/
public java.util.List
getEntriesBuilderList() {
@@ -13830,7 +14447,7 @@ public final class ReplicatorMessages {
entriesBuilder_ = new akka.protobuf.RepeatedFieldBuilder<
akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DeltaPropagation.EntryOrBuilder>(
entries_,
- ((bitField0_ & 0x00000001) == 0x00000001),
+ ((bitField0_ & 0x00000002) == 0x00000002),
getParentForChildren(),
isClean());
entries_ = null;
@@ -15142,6 +15759,1304 @@ public final class ReplicatorMessages {
// @@protoc_insertion_point(class_scope:akka.cluster.ddata.Address)
}
+ public interface VersionVectorOrBuilder
+ extends akka.protobuf.MessageOrBuilder {
+
+ // repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ java.util.List
+ getEntriesList();
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry getEntries(int index);
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ int getEntriesCount();
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ java.util.List extends akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.EntryOrBuilder>
+ getEntriesOrBuilderList();
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.EntryOrBuilder getEntriesOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code akka.cluster.ddata.VersionVector}
+ */
+ public static final class VersionVector extends
+ akka.protobuf.GeneratedMessage
+ implements VersionVectorOrBuilder {
+ // Use VersionVector.newBuilder() to construct.
+ private VersionVector(akka.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private VersionVector(boolean noInit) { this.unknownFields = akka.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final VersionVector defaultInstance;
+ public static VersionVector getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public VersionVector getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final akka.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final akka.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private VersionVector(
+ akka.protobuf.CodedInputStream input,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws akka.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ akka.protobuf.UnknownFieldSet.Builder unknownFields =
+ akka.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ entries_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ entries_.add(input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (akka.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new akka.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ entries_ = java.util.Collections.unmodifiableList(entries_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final akka.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_VersionVector_descriptor;
+ }
+
+ protected akka.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_VersionVector_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Builder.class);
+ }
+
+ public static akka.protobuf.Parser PARSER =
+ new akka.protobuf.AbstractParser() {
+ public VersionVector parsePartialFrom(
+ akka.protobuf.CodedInputStream input,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws akka.protobuf.InvalidProtocolBufferException {
+ return new VersionVector(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public akka.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ public interface EntryOrBuilder
+ extends akka.protobuf.MessageOrBuilder {
+
+ // required .akka.cluster.ddata.UniqueAddress node = 1;
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ boolean hasNode();
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getNode();
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getNodeOrBuilder();
+
+ // required int64 version = 2;
+ /**
+ * required int64 version = 2;
+ */
+ boolean hasVersion();
+ /**
+ * required int64 version = 2;
+ */
+ long getVersion();
+ }
+ /**
+ * Protobuf type {@code akka.cluster.ddata.VersionVector.Entry}
+ */
+ public static final class Entry extends
+ akka.protobuf.GeneratedMessage
+ implements EntryOrBuilder {
+ // Use Entry.newBuilder() to construct.
+ private Entry(akka.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Entry(boolean noInit) { this.unknownFields = akka.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Entry defaultInstance;
+ public static Entry getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Entry getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final akka.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final akka.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Entry(
+ akka.protobuf.CodedInputStream input,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws akka.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ akka.protobuf.UnknownFieldSet.Builder unknownFields =
+ akka.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = node_.toBuilder();
+ }
+ node_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(node_);
+ node_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ version_ = input.readInt64();
+ break;
+ }
+ }
+ }
+ } catch (akka.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new akka.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final akka.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_VersionVector_Entry_descriptor;
+ }
+
+ protected akka.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_VersionVector_Entry_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.Builder.class);
+ }
+
+ public static akka.protobuf.Parser PARSER =
+ new akka.protobuf.AbstractParser() {
+ public Entry parsePartialFrom(
+ akka.protobuf.CodedInputStream input,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws akka.protobuf.InvalidProtocolBufferException {
+ return new Entry(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public akka.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .akka.cluster.ddata.UniqueAddress node = 1;
+ public static final int NODE_FIELD_NUMBER = 1;
+ private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress node_;
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ public boolean hasNode() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getNode() {
+ return node_;
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getNodeOrBuilder() {
+ return node_;
+ }
+
+ // required int64 version = 2;
+ public static final int VERSION_FIELD_NUMBER = 2;
+ private long version_;
+ /**
+ * required int64 version = 2;
+ */
+ public boolean hasVersion() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required int64 version = 2;
+ */
+ public long getVersion() {
+ return version_;
+ }
+
+ private void initFields() {
+ node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance();
+ version_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasNode()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasVersion()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getNode().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(akka.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, node_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeInt64(2, version_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += akka.protobuf.CodedOutputStream
+ .computeMessageSize(1, node_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += akka.protobuf.CodedOutputStream
+ .computeInt64Size(2, version_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry parseFrom(
+ akka.protobuf.ByteString data)
+ throws akka.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry parseFrom(
+ akka.protobuf.ByteString data,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws akka.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry parseFrom(byte[] data)
+ throws akka.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry parseFrom(
+ byte[] data,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws akka.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry parseFrom(
+ java.io.InputStream input,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry parseDelimitedFrom(
+ java.io.InputStream input,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry parseFrom(
+ akka.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry parseFrom(
+ akka.protobuf.CodedInputStream input,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ akka.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code akka.cluster.ddata.VersionVector.Entry}
+ */
+ public static final class Builder extends
+ akka.protobuf.GeneratedMessage.Builder
+ implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.EntryOrBuilder {
+ public static final akka.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_VersionVector_Entry_descriptor;
+ }
+
+ protected akka.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_VersionVector_Entry_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.Builder.class);
+ }
+
+ // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ akka.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (akka.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getNodeFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (nodeBuilder_ == null) {
+ node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance();
+ } else {
+ nodeBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ version_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public akka.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_VersionVector_Entry_descriptor;
+ }
+
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry getDefaultInstanceForType() {
+ return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.getDefaultInstance();
+ }
+
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry build() {
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry buildPartial() {
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (nodeBuilder_ == null) {
+ result.node_ = node_;
+ } else {
+ result.node_ = nodeBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.version_ = version_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(akka.protobuf.Message other) {
+ if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry) {
+ return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry other) {
+ if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.getDefaultInstance()) return this;
+ if (other.hasNode()) {
+ mergeNode(other.getNode());
+ }
+ if (other.hasVersion()) {
+ setVersion(other.getVersion());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasNode()) {
+
+ return false;
+ }
+ if (!hasVersion()) {
+
+ return false;
+ }
+ if (!getNode().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ akka.protobuf.CodedInputStream input,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (akka.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .akka.cluster.ddata.UniqueAddress node = 1;
+ private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance();
+ private akka.protobuf.SingleFieldBuilder<
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder> nodeBuilder_;
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ public boolean hasNode() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getNode() {
+ if (nodeBuilder_ == null) {
+ return node_;
+ } else {
+ return nodeBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ public Builder setNode(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress value) {
+ if (nodeBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ node_ = value;
+ onChanged();
+ } else {
+ nodeBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ public Builder setNode(
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder builderForValue) {
+ if (nodeBuilder_ == null) {
+ node_ = builderForValue.build();
+ onChanged();
+ } else {
+ nodeBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ public Builder mergeNode(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress value) {
+ if (nodeBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ node_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance()) {
+ node_ =
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.newBuilder(node_).mergeFrom(value).buildPartial();
+ } else {
+ node_ = value;
+ }
+ onChanged();
+ } else {
+ nodeBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ public Builder clearNode() {
+ if (nodeBuilder_ == null) {
+ node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance();
+ onChanged();
+ } else {
+ nodeBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder getNodeBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getNodeFieldBuilder().getBuilder();
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getNodeOrBuilder() {
+ if (nodeBuilder_ != null) {
+ return nodeBuilder_.getMessageOrBuilder();
+ } else {
+ return node_;
+ }
+ }
+ /**
+ * required .akka.cluster.ddata.UniqueAddress node = 1;
+ */
+ private akka.protobuf.SingleFieldBuilder<
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder>
+ getNodeFieldBuilder() {
+ if (nodeBuilder_ == null) {
+ nodeBuilder_ = new akka.protobuf.SingleFieldBuilder<
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder>(
+ node_,
+ getParentForChildren(),
+ isClean());
+ node_ = null;
+ }
+ return nodeBuilder_;
+ }
+
+ // required int64 version = 2;
+ private long version_ ;
+ /**
+ * required int64 version = 2;
+ */
+ public boolean hasVersion() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required int64 version = 2;
+ */
+ public long getVersion() {
+ return version_;
+ }
+ /**
+ * required int64 version = 2;
+ */
+ public Builder setVersion(long value) {
+ bitField0_ |= 0x00000002;
+ version_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required int64 version = 2;
+ */
+ public Builder clearVersion() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ version_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.VersionVector.Entry)
+ }
+
+ static {
+ defaultInstance = new Entry(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:akka.cluster.ddata.VersionVector.Entry)
+ }
+
+ // repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ public static final int ENTRIES_FIELD_NUMBER = 1;
+ private java.util.List entries_;
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public java.util.List getEntriesList() {
+ return entries_;
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public java.util.List extends akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.EntryOrBuilder>
+ getEntriesOrBuilderList() {
+ return entries_;
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public int getEntriesCount() {
+ return entries_.size();
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry getEntries(int index) {
+ return entries_.get(index);
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.EntryOrBuilder getEntriesOrBuilder(
+ int index) {
+ return entries_.get(index);
+ }
+
+ private void initFields() {
+ entries_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ for (int i = 0; i < getEntriesCount(); i++) {
+ if (!getEntries(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(akka.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (int i = 0; i < entries_.size(); i++) {
+ output.writeMessage(1, entries_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (int i = 0; i < entries_.size(); i++) {
+ size += akka.protobuf.CodedOutputStream
+ .computeMessageSize(1, entries_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector parseFrom(
+ akka.protobuf.ByteString data)
+ throws akka.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector parseFrom(
+ akka.protobuf.ByteString data,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws akka.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector parseFrom(byte[] data)
+ throws akka.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector parseFrom(
+ byte[] data,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws akka.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector parseFrom(
+ java.io.InputStream input,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector parseDelimitedFrom(
+ java.io.InputStream input,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector parseFrom(
+ akka.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector parseFrom(
+ akka.protobuf.CodedInputStream input,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ akka.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code akka.cluster.ddata.VersionVector}
+ */
+ public static final class Builder extends
+ akka.protobuf.GeneratedMessage.Builder
+ implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVectorOrBuilder {
+ public static final akka.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_VersionVector_descriptor;
+ }
+
+ protected akka.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_VersionVector_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Builder.class);
+ }
+
+ // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ akka.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (akka.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getEntriesFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (entriesBuilder_ == null) {
+ entries_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ entriesBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public akka.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_VersionVector_descriptor;
+ }
+
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector getDefaultInstanceForType() {
+ return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.getDefaultInstance();
+ }
+
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector build() {
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector buildPartial() {
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector(this);
+ int from_bitField0_ = bitField0_;
+ if (entriesBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ entries_ = java.util.Collections.unmodifiableList(entries_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.entries_ = entries_;
+ } else {
+ result.entries_ = entriesBuilder_.build();
+ }
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(akka.protobuf.Message other) {
+ if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector) {
+ return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector other) {
+ if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.getDefaultInstance()) return this;
+ if (entriesBuilder_ == null) {
+ if (!other.entries_.isEmpty()) {
+ if (entries_.isEmpty()) {
+ entries_ = other.entries_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureEntriesIsMutable();
+ entries_.addAll(other.entries_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.entries_.isEmpty()) {
+ if (entriesBuilder_.isEmpty()) {
+ entriesBuilder_.dispose();
+ entriesBuilder_ = null;
+ entries_ = other.entries_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ entriesBuilder_ =
+ akka.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getEntriesFieldBuilder() : null;
+ } else {
+ entriesBuilder_.addAllMessages(other.entries_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ for (int i = 0; i < getEntriesCount(); i++) {
+ if (!getEntries(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ akka.protobuf.CodedInputStream input,
+ akka.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (akka.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ private java.util.List entries_ =
+ java.util.Collections.emptyList();
+ private void ensureEntriesIsMutable() {
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ entries_ = new java.util.ArrayList(entries_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+
+ private akka.protobuf.RepeatedFieldBuilder<
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.EntryOrBuilder> entriesBuilder_;
+
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public java.util.List getEntriesList() {
+ if (entriesBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(entries_);
+ } else {
+ return entriesBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public int getEntriesCount() {
+ if (entriesBuilder_ == null) {
+ return entries_.size();
+ } else {
+ return entriesBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry getEntries(int index) {
+ if (entriesBuilder_ == null) {
+ return entries_.get(index);
+ } else {
+ return entriesBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public Builder setEntries(
+ int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry value) {
+ if (entriesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureEntriesIsMutable();
+ entries_.set(index, value);
+ onChanged();
+ } else {
+ entriesBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public Builder setEntries(
+ int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.Builder builderForValue) {
+ if (entriesBuilder_ == null) {
+ ensureEntriesIsMutable();
+ entries_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ entriesBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public Builder addEntries(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry value) {
+ if (entriesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureEntriesIsMutable();
+ entries_.add(value);
+ onChanged();
+ } else {
+ entriesBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public Builder addEntries(
+ int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry value) {
+ if (entriesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureEntriesIsMutable();
+ entries_.add(index, value);
+ onChanged();
+ } else {
+ entriesBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public Builder addEntries(
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.Builder builderForValue) {
+ if (entriesBuilder_ == null) {
+ ensureEntriesIsMutable();
+ entries_.add(builderForValue.build());
+ onChanged();
+ } else {
+ entriesBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public Builder addEntries(
+ int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.Builder builderForValue) {
+ if (entriesBuilder_ == null) {
+ ensureEntriesIsMutable();
+ entries_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ entriesBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public Builder addAllEntries(
+ java.lang.Iterable extends akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry> values) {
+ if (entriesBuilder_ == null) {
+ ensureEntriesIsMutable();
+ super.addAll(values, entries_);
+ onChanged();
+ } else {
+ entriesBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public Builder clearEntries() {
+ if (entriesBuilder_ == null) {
+ entries_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ entriesBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public Builder removeEntries(int index) {
+ if (entriesBuilder_ == null) {
+ ensureEntriesIsMutable();
+ entries_.remove(index);
+ onChanged();
+ } else {
+ entriesBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.Builder getEntriesBuilder(
+ int index) {
+ return getEntriesFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.EntryOrBuilder getEntriesOrBuilder(
+ int index) {
+ if (entriesBuilder_ == null) {
+ return entries_.get(index); } else {
+ return entriesBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public java.util.List extends akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.EntryOrBuilder>
+ getEntriesOrBuilderList() {
+ if (entriesBuilder_ != null) {
+ return entriesBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(entries_);
+ }
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.Builder addEntriesBuilder() {
+ return getEntriesFieldBuilder().addBuilder(
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.getDefaultInstance());
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.Builder addEntriesBuilder(
+ int index) {
+ return getEntriesFieldBuilder().addBuilder(
+ index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.getDefaultInstance());
+ }
+ /**
+ * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1;
+ */
+ public java.util.List
+ getEntriesBuilderList() {
+ return getEntriesFieldBuilder().getBuilderList();
+ }
+ private akka.protobuf.RepeatedFieldBuilder<
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.EntryOrBuilder>
+ getEntriesFieldBuilder() {
+ if (entriesBuilder_ == null) {
+ entriesBuilder_ = new akka.protobuf.RepeatedFieldBuilder<
+ akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.VersionVector.EntryOrBuilder>(
+ entries_,
+ ((bitField0_ & 0x00000001) == 0x00000001),
+ getParentForChildren(),
+ isClean());
+ entries_ = null;
+ }
+ return entriesBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.VersionVector)
+ }
+
+ static {
+ defaultInstance = new VersionVector(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:akka.cluster.ddata.VersionVector)
+ }
+
public interface OtherMessageOrBuilder
extends akka.protobuf.MessageOrBuilder {
@@ -17221,6 +19136,16 @@ public final class ReplicatorMessages {
private static
akka.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_akka_cluster_ddata_Address_fieldAccessorTable;
+ private static akka.protobuf.Descriptors.Descriptor
+ internal_static_akka_cluster_ddata_VersionVector_descriptor;
+ private static
+ akka.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_akka_cluster_ddata_VersionVector_fieldAccessorTable;
+ private static akka.protobuf.Descriptors.Descriptor
+ internal_static_akka_cluster_ddata_VersionVector_Entry_descriptor;
+ private static
+ akka.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_akka_cluster_ddata_VersionVector_Entry_fieldAccessorTable;
private static akka.protobuf.Descriptors.Descriptor
internal_static_akka_cluster_ddata_OtherMessage_descriptor;
private static
@@ -17269,38 +19194,45 @@ public final class ReplicatorMessages {
"key\030\001 \002(\t\0222\n\010envelope\030\002 \002(\0132 .akka.clust" +
"er.ddata.DataEnvelope\"\007\n\005Empty\"\023\n\004Read\022\013" +
"\n\003key\030\001 \002(\t\"@\n\nReadResult\0222\n\010envelope\030\001 " +
- "\001(\0132 .akka.cluster.ddata.DataEnvelope\"\327\002" +
+ "\001(\0132 .akka.cluster.ddata.DataEnvelope\"\221\003" +
"\n\014DataEnvelope\022.\n\004data\030\001 \002(\0132 .akka.clus" +
"ter.ddata.OtherMessage\022>\n\007pruning\030\002 \003(\0132" +
"-.akka.cluster.ddata.DataEnvelope.Prunin" +
- "gEntry\032\326\001\n\014PruningEntry\0229\n\016removedAddres" +
- "s\030\001 \002(\0132!.akka.cluster.ddata.UniqueAddre",
- "ss\0227\n\014ownerAddress\030\002 \002(\0132!.akka.cluster." +
- "ddata.UniqueAddress\022\021\n\tperformed\030\003 \002(\010\022)" +
- "\n\004seen\030\004 \003(\0132\033.akka.cluster.ddata.Addres" +
- "s\022\024\n\014obsoleteTime\030\005 \001(\022\"\203\001\n\006Status\022\r\n\005ch" +
- "unk\030\001 \002(\r\022\021\n\ttotChunks\030\002 \002(\r\0221\n\007entries\030" +
- "\003 \003(\0132 .akka.cluster.ddata.Status.Entry\032" +
- "$\n\005Entry\022\013\n\003key\030\001 \002(\t\022\016\n\006digest\030\002 \002(\014\"\227\001" +
- "\n\006Gossip\022\020\n\010sendBack\030\001 \002(\010\0221\n\007entries\030\002 " +
- "\003(\0132 .akka.cluster.ddata.Gossip.Entry\032H\n" +
- "\005Entry\022\013\n\003key\030\001 \002(\t\0222\n\010envelope\030\002 \002(\0132 .",
- "akka.cluster.ddata.DataEnvelope\"\231\001\n\020Delt" +
- "aPropagation\022;\n\007entries\030\001 \003(\0132*.akka.clu" +
- "ster.ddata.DeltaPropagation.Entry\032H\n\005Ent" +
- "ry\022\013\n\003key\030\001 \002(\t\0222\n\010envelope\030\002 \002(\0132 .akka" +
- ".cluster.ddata.DataEnvelope\"X\n\rUniqueAdd" +
- "ress\022,\n\007address\030\001 \002(\0132\033.akka.cluster.dda" +
- "ta.Address\022\013\n\003uid\030\002 \002(\017\022\014\n\004uid2\030\003 \001(\017\")\n" +
- "\007Address\022\020\n\010hostname\030\001 \002(\t\022\014\n\004port\030\002 \002(\r" +
- "\"V\n\014OtherMessage\022\027\n\017enclosedMessage\030\001 \002(" +
- "\014\022\024\n\014serializerId\030\002 \002(\005\022\027\n\017messageManife",
- "st\030\004 \001(\014\"\036\n\nStringGSet\022\020\n\010elements\030\001 \003(\t" +
- "\"\205\001\n\023DurableDataEnvelope\022.\n\004data\030\001 \002(\0132 " +
- ".akka.cluster.ddata.OtherMessage\022>\n\007prun" +
- "ing\030\002 \003(\0132-.akka.cluster.ddata.DataEnvel" +
- "ope.PruningEntryB#\n\037akka.cluster.ddata.p" +
- "rotobuf.msgH\001"
+ "gEntry\0228\n\rdeltaVersions\030\003 \001(\0132!.akka.clu" +
+ "ster.ddata.VersionVector\032\326\001\n\014PruningEntr",
+ "y\0229\n\016removedAddress\030\001 \002(\0132!.akka.cluster" +
+ ".ddata.UniqueAddress\0227\n\014ownerAddress\030\002 \002" +
+ "(\0132!.akka.cluster.ddata.UniqueAddress\022\021\n" +
+ "\tperformed\030\003 \002(\010\022)\n\004seen\030\004 \003(\0132\033.akka.cl" +
+ "uster.ddata.Address\022\024\n\014obsoleteTime\030\005 \001(" +
+ "\022\"\203\001\n\006Status\022\r\n\005chunk\030\001 \002(\r\022\021\n\ttotChunks" +
+ "\030\002 \002(\r\0221\n\007entries\030\003 \003(\0132 .akka.cluster.d" +
+ "data.Status.Entry\032$\n\005Entry\022\013\n\003key\030\001 \002(\t\022" +
+ "\016\n\006digest\030\002 \002(\014\"\227\001\n\006Gossip\022\020\n\010sendBack\030\001" +
+ " \002(\010\0221\n\007entries\030\002 \003(\0132 .akka.cluster.dda",
+ "ta.Gossip.Entry\032H\n\005Entry\022\013\n\003key\030\001 \002(\t\0222\n" +
+ "\010envelope\030\002 \002(\0132 .akka.cluster.ddata.Dat" +
+ "aEnvelope\"\362\001\n\020DeltaPropagation\0223\n\010fromNo" +
+ "de\030\001 \002(\0132!.akka.cluster.ddata.UniqueAddr" +
+ "ess\022;\n\007entries\030\002 \003(\0132*.akka.cluster.ddat" +
+ "a.DeltaPropagation.Entry\032l\n\005Entry\022\013\n\003key" +
+ "\030\001 \002(\t\0222\n\010envelope\030\002 \002(\0132 .akka.cluster." +
+ "ddata.DataEnvelope\022\021\n\tfromSeqNr\030\003 \002(\003\022\017\n" +
+ "\007toSeqNr\030\004 \001(\003\"X\n\rUniqueAddress\022,\n\007addre" +
+ "ss\030\001 \002(\0132\033.akka.cluster.ddata.Address\022\013\n",
+ "\003uid\030\002 \002(\017\022\014\n\004uid2\030\003 \001(\017\")\n\007Address\022\020\n\010h" +
+ "ostname\030\001 \002(\t\022\014\n\004port\030\002 \002(\r\"\224\001\n\rVersionV" +
+ "ector\0228\n\007entries\030\001 \003(\0132\'.akka.cluster.dd" +
+ "ata.VersionVector.Entry\032I\n\005Entry\022/\n\004node" +
+ "\030\001 \002(\0132!.akka.cluster.ddata.UniqueAddres" +
+ "s\022\017\n\007version\030\002 \002(\003\"V\n\014OtherMessage\022\027\n\017en" +
+ "closedMessage\030\001 \002(\014\022\024\n\014serializerId\030\002 \002(" +
+ "\005\022\027\n\017messageManifest\030\004 \001(\014\"\036\n\nStringGSet" +
+ "\022\020\n\010elements\030\001 \003(\t\"\205\001\n\023DurableDataEnvelo" +
+ "pe\022.\n\004data\030\001 \002(\0132 .akka.cluster.ddata.Ot",
+ "herMessage\022>\n\007pruning\030\002 \003(\0132-.akka.clust" +
+ "er.ddata.DataEnvelope.PruningEntryB#\n\037ak" +
+ "ka.cluster.ddata.protobuf.msgH\001"
};
akka.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new akka.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -17378,7 +19310,7 @@ public final class ReplicatorMessages {
internal_static_akka_cluster_ddata_DataEnvelope_fieldAccessorTable = new
akka.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_akka_cluster_ddata_DataEnvelope_descriptor,
- new java.lang.String[] { "Data", "Pruning", });
+ new java.lang.String[] { "Data", "Pruning", "DeltaVersions", });
internal_static_akka_cluster_ddata_DataEnvelope_PruningEntry_descriptor =
internal_static_akka_cluster_ddata_DataEnvelope_descriptor.getNestedTypes().get(0);
internal_static_akka_cluster_ddata_DataEnvelope_PruningEntry_fieldAccessorTable = new
@@ -17414,13 +19346,13 @@ public final class ReplicatorMessages {
internal_static_akka_cluster_ddata_DeltaPropagation_fieldAccessorTable = new
akka.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_akka_cluster_ddata_DeltaPropagation_descriptor,
- new java.lang.String[] { "Entries", });
+ new java.lang.String[] { "FromNode", "Entries", });
internal_static_akka_cluster_ddata_DeltaPropagation_Entry_descriptor =
internal_static_akka_cluster_ddata_DeltaPropagation_descriptor.getNestedTypes().get(0);
internal_static_akka_cluster_ddata_DeltaPropagation_Entry_fieldAccessorTable = new
akka.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_akka_cluster_ddata_DeltaPropagation_Entry_descriptor,
- new java.lang.String[] { "Key", "Envelope", });
+ new java.lang.String[] { "Key", "Envelope", "FromSeqNr", "ToSeqNr", });
internal_static_akka_cluster_ddata_UniqueAddress_descriptor =
getDescriptor().getMessageTypes().get(15);
internal_static_akka_cluster_ddata_UniqueAddress_fieldAccessorTable = new
@@ -17433,20 +19365,32 @@ public final class ReplicatorMessages {
akka.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_akka_cluster_ddata_Address_descriptor,
new java.lang.String[] { "Hostname", "Port", });
- internal_static_akka_cluster_ddata_OtherMessage_descriptor =
+ internal_static_akka_cluster_ddata_VersionVector_descriptor =
getDescriptor().getMessageTypes().get(17);
+ internal_static_akka_cluster_ddata_VersionVector_fieldAccessorTable = new
+ akka.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_akka_cluster_ddata_VersionVector_descriptor,
+ new java.lang.String[] { "Entries", });
+ internal_static_akka_cluster_ddata_VersionVector_Entry_descriptor =
+ internal_static_akka_cluster_ddata_VersionVector_descriptor.getNestedTypes().get(0);
+ internal_static_akka_cluster_ddata_VersionVector_Entry_fieldAccessorTable = new
+ akka.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_akka_cluster_ddata_VersionVector_Entry_descriptor,
+ new java.lang.String[] { "Node", "Version", });
+ internal_static_akka_cluster_ddata_OtherMessage_descriptor =
+ getDescriptor().getMessageTypes().get(18);
internal_static_akka_cluster_ddata_OtherMessage_fieldAccessorTable = new
akka.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_akka_cluster_ddata_OtherMessage_descriptor,
new java.lang.String[] { "EnclosedMessage", "SerializerId", "MessageManifest", });
internal_static_akka_cluster_ddata_StringGSet_descriptor =
- getDescriptor().getMessageTypes().get(18);
+ getDescriptor().getMessageTypes().get(19);
internal_static_akka_cluster_ddata_StringGSet_fieldAccessorTable = new
akka.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_akka_cluster_ddata_StringGSet_descriptor,
new java.lang.String[] { "Elements", });
internal_static_akka_cluster_ddata_DurableDataEnvelope_descriptor =
- getDescriptor().getMessageTypes().get(19);
+ getDescriptor().getMessageTypes().get(20);
internal_static_akka_cluster_ddata_DurableDataEnvelope_fieldAccessorTable = new
akka.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_akka_cluster_ddata_DurableDataEnvelope_descriptor,
diff --git a/akka-distributed-data/src/main/protobuf/ReplicatedDataMessages.proto b/akka-distributed-data/src/main/protobuf/ReplicatedDataMessages.proto
index d56449264d..6a5020923c 100644
--- a/akka-distributed-data/src/main/protobuf/ReplicatedDataMessages.proto
+++ b/akka-distributed-data/src/main/protobuf/ReplicatedDataMessages.proto
@@ -21,7 +21,21 @@ message ORSet {
repeated sint32 intElements = 4 [packed=true];
repeated sint64 longElements = 5 [packed=true];
repeated OtherMessage otherElements = 6;
+}
+
+message ORSetDeltaGroup {
+ message Entry {
+ required ORSetDeltaOp operation = 1;
+ required ORSet underlying = 2;
+ }
+ repeated Entry entries = 1;
+}
+
+enum ORSetDeltaOp {
+ Add = 0;
+ Remove = 1;
+ Full = 2;
}
message Flag {
@@ -48,14 +62,6 @@ message PNCounter {
required GCounter decrements = 2;
}
-message VersionVector {
- message Entry {
- required UniqueAddress node = 1;
- required int64 version = 2;
- }
- repeated Entry entries = 1;
-}
-
message ORMap {
message Entry {
optional string stringKey = 1;
diff --git a/akka-distributed-data/src/main/protobuf/ReplicatorMessages.proto b/akka-distributed-data/src/main/protobuf/ReplicatorMessages.proto
index 4b1f2084b4..0582fac91b 100644
--- a/akka-distributed-data/src/main/protobuf/ReplicatorMessages.proto
+++ b/akka-distributed-data/src/main/protobuf/ReplicatorMessages.proto
@@ -73,6 +73,7 @@ message DataEnvelope {
required OtherMessage data = 1;
repeated PruningEntry pruning = 2;
+ optional VersionVector deltaVersions = 3;
}
message Status {
@@ -100,9 +101,12 @@ message DeltaPropagation {
message Entry {
required string key = 1;
required DataEnvelope envelope = 2;
+ required int64 fromSeqNr = 3;
+ optional int64 toSeqNr = 4; // if not set then same as fromSequenceNr
}
- repeated Entry entries = 1;
+ required UniqueAddress fromNode = 1;
+ repeated Entry entries = 2;
}
message UniqueAddress {
@@ -117,6 +121,14 @@ message Address {
required uint32 port = 2;
}
+message VersionVector {
+ message Entry {
+ required UniqueAddress node = 1;
+ required int64 version = 2;
+ }
+ repeated Entry entries = 1;
+}
+
message OtherMessage {
required bytes enclosedMessage = 1;
required int32 serializerId = 2;
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DeltaPropagationSelector.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DeltaPropagationSelector.scala
index a8c2dfdf5e..488b90cfa4 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DeltaPropagationSelector.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DeltaPropagationSelector.scala
@@ -4,48 +4,53 @@
package akka.cluster.ddata
import scala.collection.immutable.TreeMap
-import akka.cluster.ddata.Replicator.Internal.DeltaPropagation
+
import akka.actor.Address
-import akka.cluster.ddata.Replicator.Internal.DataEnvelope
+import akka.annotation.InternalApi
+import akka.cluster.ddata.Key.KeyId
+import akka.cluster.ddata.Replicator.Internal.DeltaPropagation
/**
* INTERNAL API: Used by the Replicator actor.
* Extracted to separate trait to make it easy to test.
*/
-private[akka] trait DeltaPropagationSelector {
+@InternalApi private[akka] trait DeltaPropagationSelector {
private var _propagationCount = 0L
def propagationCount: Long = _propagationCount
- private var deltaCounter = Map.empty[String, Long]
- private var deltaEntries = Map.empty[String, TreeMap[Long, ReplicatedData]]
- private var deltaSentToNode = Map.empty[String, Map[Address, Long]]
+ private var deltaCounter = Map.empty[KeyId, Long]
+ private var deltaEntries = Map.empty[KeyId, TreeMap[Long, ReplicatedData]]
+ private var deltaSentToNode = Map.empty[KeyId, Map[Address, Long]]
private var deltaNodeRoundRobinCounter = 0L
- def divisor: Int
+ def gossipIntervalDivisor: Int
def allNodes: Vector[Address]
- def createDeltaPropagation(deltas: Map[String, ReplicatedData]): DeltaPropagation
+ def createDeltaPropagation(deltas: Map[KeyId, (ReplicatedData, Long, Long)]): DeltaPropagation
- def update(key: String, delta: ReplicatedData): Unit = {
- val c = deltaCounter.get(key) match {
- case Some(c) ⇒ c
- case None ⇒
- deltaCounter = deltaCounter.updated(key, 1L)
- 1L
- }
- val deltaEntriesForKey = deltaEntries.getOrElse(key, TreeMap.empty[Long, ReplicatedData])
- val updatedEntriesForKey =
- deltaEntriesForKey.get(c) match {
- case Some(existingDelta) ⇒
- deltaEntriesForKey.updated(c, existingDelta.merge(delta.asInstanceOf[existingDelta.T]))
- case None ⇒
- deltaEntriesForKey.updated(c, delta)
- }
- deltaEntries = deltaEntries.updated(key, updatedEntriesForKey)
+ def currentVersion(key: KeyId): Long = deltaCounter.get(key) match {
+ case Some(v) ⇒ v
+ case None ⇒ 0L
}
- def delete(key: String): Unit = {
+ def update(key: KeyId, delta: ReplicatedData): Unit = {
+ // bump the counter for each update
+ val version = deltaCounter.get(key) match {
+ case Some(c) ⇒ c + 1
+ case None ⇒ 1L
+ }
+ deltaCounter = deltaCounter.updated(key, version)
+
+ val deltaEntriesForKey = deltaEntries.get(key) match {
+ case Some(m) ⇒ m
+ case None ⇒ TreeMap.empty[Long, ReplicatedData]
+ }
+
+ deltaEntries = deltaEntries.updated(key, deltaEntriesForKey.updated(version, delta))
+ }
+
+ def delete(key: KeyId): Unit = {
deltaEntries -= key
deltaCounter -= key
deltaSentToNode -= key
@@ -53,7 +58,7 @@ private[akka] trait DeltaPropagationSelector {
def nodesSliceSize(allNodesSize: Int): Int = {
// 2 - 10 nodes
- math.min(math.max((allNodesSize / divisor) + 1, 2), math.min(allNodesSize, 10))
+ math.min(math.max((allNodesSize / gossipIntervalDivisor) + 1, 2), math.min(allNodesSize, 10))
}
def collectPropagations(): Map[Address, DeltaPropagation] = {
@@ -80,20 +85,32 @@ private[akka] trait DeltaPropagationSelector {
var result = Map.empty[Address, DeltaPropagation]
+ var cache = Map.empty[(KeyId, Long, Long), ReplicatedData]
slice.foreach { node ⇒
// collect the deltas that have not already been sent to the node and merge
// them into a delta group
- var deltas = Map.empty[String, ReplicatedData]
+ var deltas = Map.empty[KeyId, (ReplicatedData, Long, Long)]
deltaEntries.foreach {
case (key, entries) ⇒
val deltaSentToNodeForKey = deltaSentToNode.getOrElse(key, TreeMap.empty[Address, Long])
val j = deltaSentToNodeForKey.getOrElse(node, 0L)
val deltaEntriesAfterJ = deltaEntriesAfter(entries, j)
if (deltaEntriesAfterJ.nonEmpty) {
- val deltaGroup = deltaEntriesAfterJ.valuesIterator.reduceLeft {
- (d1, d2) ⇒ d1.merge(d2.asInstanceOf[d1.T])
+ val fromSeqNr = deltaEntriesAfterJ.head._1
+ val toSeqNr = deltaEntriesAfterJ.last._1
+ // in most cases the delta group merging will be the same for each node,
+ // so we cache the merged results
+ val cacheKey = (key, fromSeqNr, toSeqNr)
+ val deltaGroup = cache.get(cacheKey) match {
+ case None ⇒
+ val group = deltaEntriesAfterJ.valuesIterator.reduceLeft {
+ (d1, d2) ⇒ d1.merge(d2.asInstanceOf[d1.T])
+ }
+ cache = cache.updated(cacheKey, group)
+ group
+ case Some(group) ⇒ group
}
- deltas = deltas.updated(key, deltaGroup)
+ deltas = deltas.updated(key, (deltaGroup, fromSeqNr, toSeqNr))
deltaSentToNode = deltaSentToNode.updated(key, deltaSentToNodeForKey.updated(node, deltaEntriesAfterJ.lastKey))
}
}
@@ -106,15 +123,6 @@ private[akka] trait DeltaPropagationSelector {
}
}
- // increase the counter
- deltaCounter = deltaCounter.map {
- case (key, value) ⇒
- if (deltaEntries.contains(key))
- key → (value + 1)
- else
- key → value
- }
-
result
}
}
@@ -126,14 +134,14 @@ private[akka] trait DeltaPropagationSelector {
case ntrs ⇒ ntrs
}
- def hasDeltaEntries(key: String): Boolean = {
+ def hasDeltaEntries(key: KeyId): Boolean = {
deltaEntries.get(key) match {
case Some(m) ⇒ m.nonEmpty
case None ⇒ false
}
}
- private def findSmallestVersionPropagatedToAllNodes(key: String, all: Vector[Address]): Long = {
+ private def findSmallestVersionPropagatedToAllNodes(key: KeyId, all: Vector[Address]): Long = {
deltaSentToNode.get(key) match {
case None ⇒ 0L
case Some(deltaSentToNodeForKey) ⇒
@@ -154,7 +162,7 @@ private[akka] trait DeltaPropagationSelector {
val deltaEntriesAfterMin = deltaEntriesAfter(entries, minVersion)
- // TODO perhaps also remove oldest when deltaCounter are too far ahead (e.g. 10 cylces)
+ // TODO perhaps also remove oldest when deltaCounter is too far ahead (e.g. 10 cycles)
key → deltaEntriesAfterMin
}
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala
index 04f744936e..edccca3e86 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala
@@ -18,8 +18,9 @@ import akka.actor.ActorRef
import akka.actor.DeadLetterSuppression
import akka.actor.Props
import akka.cluster.Cluster
-import akka.cluster.ddata.Replicator.ReplicatorMessage
+import akka.cluster.ddata.Key.KeyId
import akka.cluster.ddata.Replicator.Internal.DataEnvelope
+import akka.cluster.ddata.Replicator.ReplicatorMessage
import akka.io.DirectByteBufferPool
import akka.serialization.SerializationExtension
import akka.serialization.SerializerWithStringManifest
@@ -53,7 +54,7 @@ object DurableStore {
* should be used to signal success or failure of the operation to the contained
* `replyTo` actor.
*/
- final case class Store(key: String, data: DurableDataEnvelope, reply: Option[StoreReply])
+ final case class Store(key: KeyId, data: DurableDataEnvelope, reply: Option[StoreReply])
final case class StoreReply(successMsg: Any, failureMsg: Any, replyTo: ActorRef)
/**
@@ -66,7 +67,7 @@ object DurableStore {
* will stop itself and the durable store.
*/
case object LoadAll
- final case class LoadData(data: Map[String, DurableDataEnvelope])
+ final case class LoadData(data: Map[KeyId, DurableDataEnvelope])
case object LoadAllCompleted
class LoadFailed(message: String, cause: Throwable) extends RuntimeException(message, cause) {
def this(message: String) = this(message, null)
@@ -143,7 +144,7 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
}
// pending write behind
- val pending = new java.util.HashMap[String, DurableDataEnvelope]
+ val pending = new java.util.HashMap[KeyId, DurableDataEnvelope]
override def postRestart(reason: Throwable): Unit = {
super.postRestart(reason)
@@ -227,7 +228,7 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
writeBehind()
}
- def dbPut(tx: OptionVal[Txn[ByteBuffer]], key: String, data: DurableDataEnvelope): Unit = {
+ def dbPut(tx: OptionVal[Txn[ByteBuffer]], key: KeyId, data: DurableDataEnvelope): Unit = {
try {
keyBuffer.put(key.getBytes(ByteString.UTF_8)).flip()
val value = serializer.toBinary(data)
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/FastMerge.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/FastMerge.scala
index 54300bbfbc..b7fde3de88 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/FastMerge.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/FastMerge.scala
@@ -3,6 +3,8 @@
*/
package akka.cluster.ddata
+import akka.annotation.InternalApi
+
/**
* INTERNAL API
*
@@ -19,11 +21,11 @@ package akka.cluster.ddata
* i.e. if used outside the Replicator infrastructure, but the worst thing that can happen is that
* a full merge is performed instead of the fast forward merge.
*/
-private[akka] trait FastMerge { self: ReplicatedData ⇒
+@InternalApi private[akka] trait FastMerge { self: ReplicatedData ⇒
private var ancestor: FastMerge = null
- /** INTERNAL API: should be called from "updating" methods */
+ /** INTERNAL API: should be called from "updating" methods, and `resetDelta` */
private[akka] def assignAncestor(newData: T with FastMerge): T = {
newData.ancestor = if (this.ancestor eq null) this else this.ancestor
this.ancestor = null // only one level, for GC
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala
index 58c2243d7d..89a73d8d8b 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala
@@ -6,6 +6,7 @@ package akka.cluster.ddata
import akka.cluster.Cluster
import akka.cluster.UniqueAddress
import java.math.BigInteger
+import akka.annotation.InternalApi
object GCounter {
val empty: GCounter = new GCounter
@@ -39,13 +40,15 @@ object GCounter {
*/
@SerialVersionUID(1L)
final class GCounter private[akka] (
- private[akka] val state: Map[UniqueAddress, BigInt] = Map.empty,
- private[akka] val _delta: Option[GCounter] = None)
- extends DeltaReplicatedData with ReplicatedDataSerialization with RemovedNodePruning with FastMerge {
+ private[akka] val state: Map[UniqueAddress, BigInt] = Map.empty,
+ override val delta: Option[GCounter] = None)
+ extends DeltaReplicatedData with ReplicatedDelta
+ with ReplicatedDataSerialization with RemovedNodePruning with FastMerge {
import GCounter.Zero
type T = GCounter
+ type D = GCounter
/**
* Scala API: Current total value of the counter.
@@ -73,12 +76,12 @@ final class GCounter private[akka] (
/**
* INTERNAL API
*/
- private[akka] def increment(key: UniqueAddress): GCounter = increment(key, 1)
+ @InternalApi private[akka] def increment(key: UniqueAddress): GCounter = increment(key, 1)
/**
* INTERNAL API
*/
- private[akka] def increment(key: UniqueAddress, n: BigInt): GCounter = {
+ @InternalApi private[akka] def increment(key: UniqueAddress, n: BigInt): GCounter = {
require(n >= 0, "Can't decrement a GCounter")
if (n == 0) this
else {
@@ -86,11 +89,11 @@ final class GCounter private[akka] (
case Some(v) ⇒ v + n
case None ⇒ n
}
- val newDelta = _delta match {
- case Some(d) ⇒ Some(new GCounter(d.state + (key → nextValue)))
- case None ⇒ Some(new GCounter(Map(key → nextValue)))
+ val newDelta = delta match {
+ case None ⇒ new GCounter(Map(key → nextValue))
+ case Some(d) ⇒ new GCounter(d.state + (key → nextValue))
}
- assignAncestor(new GCounter(state + (key → nextValue), newDelta))
+ assignAncestor(new GCounter(state + (key → nextValue), Some(newDelta)))
}
}
@@ -108,12 +111,13 @@ final class GCounter private[akka] (
new GCounter(merged)
}
- override def delta: GCounter = _delta match {
- case Some(d) ⇒ d
- case None ⇒ GCounter.empty
- }
+ override def mergeDelta(thatDelta: GCounter): GCounter = merge(thatDelta)
- override def resetDelta: GCounter = new GCounter(state)
+ override def zero: GCounter = GCounter.empty
+
+ override def resetDelta: GCounter =
+ if (delta.isEmpty) this
+ else assignAncestor(new GCounter(state))
override def modifiedByNodes: Set[UniqueAddress] = state.keySet
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GSet.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GSet.scala
index a566937da0..b6f9447ca8 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GSet.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GSet.scala
@@ -29,10 +29,12 @@ object GSet {
* This class is immutable, i.e. "modifying" methods return a new instance.
*/
@SerialVersionUID(1L)
-final case class GSet[A] private (elements: Set[A])(_delta: Option[GSet[A]])
- extends DeltaReplicatedData with ReplicatedDataSerialization with FastMerge {
+final case class GSet[A] private (elements: Set[A])(override val delta: Option[GSet[A]])
+ extends DeltaReplicatedData with ReplicatedDelta
+ with ReplicatedDataSerialization with FastMerge {
type T = GSet[A]
+ type D = GSet[A]
/**
* Java API
@@ -57,7 +59,7 @@ final case class GSet[A] private (elements: Set[A])(_delta: Option[GSet[A]])
* Adds an element to the set
*/
def add(element: A): GSet[A] = {
- val newDelta = _delta match {
+ val newDelta = delta match {
case Some(e) ⇒ Some(new GSet(e.elements + element)(None))
case None ⇒ Some(new GSet[A](Set.apply[A](element))(None))
}
@@ -72,16 +74,17 @@ final case class GSet[A] private (elements: Set[A])(_delta: Option[GSet[A]])
new GSet[A](elements union that.elements)(None)
}
- override def delta: GSet[A] = _delta match {
- case Some(d) ⇒ d
- case None ⇒ GSet.empty[A]
- }
+ override def mergeDelta(thatDelta: GSet[A]): GSet[A] = merge(thatDelta)
- override def resetDelta: GSet[A] = new GSet[A](elements)(None)
+ override def zero: GSet[A] = GSet.empty
+
+ override def resetDelta: GSet[A] =
+ if (delta.isEmpty) this
+ else assignAncestor(new GSet[A](elements)(None))
override def toString: String = s"G$elements"
- def copy(e: Set[A] = elements) = new GSet[A](e)(_delta)
+ def copy(e: Set[A] = elements) = new GSet[A](e)(delta)
}
object GSetKey {
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala
index 82fb28ed24..e6444322dd 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala
@@ -11,6 +11,8 @@ object Key {
private[akka]type KeyR = Key[ReplicatedData]
+ type KeyId = String
+
}
/**
@@ -21,7 +23,7 @@ object Key {
* Specific classes are provided for the built in data types, e.g. [[ORSetKey]],
* and you can create your own keys.
*/
-abstract class Key[+T <: ReplicatedData](val id: String) extends Serializable {
+abstract class Key[+T <: ReplicatedData](val id: Key.KeyId) extends Serializable {
override final def equals(o: Any): Boolean = o match {
case k: Key[_] ⇒ id == k.id
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala
index 9a2c15a050..e36a521757 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala
@@ -5,6 +5,7 @@ package akka.cluster.ddata
import akka.cluster.Cluster
import akka.cluster.UniqueAddress
+import akka.annotation.InternalApi
object LWWMap {
private val _empty: LWWMap[Any, Any] = new LWWMap(ORMap.empty)
@@ -111,7 +112,7 @@ final class LWWMap[A, B] private[akka] (
/**
* INTERNAL API
*/
- private[akka] def put(node: UniqueAddress, key: A, value: B, clock: Clock[B]): LWWMap[A, B] = {
+ @InternalApi private[akka] def put(node: UniqueAddress, key: A, value: B, clock: Clock[B]): LWWMap[A, B] = {
val newRegister = underlying.get(key) match {
case Some(r) ⇒ r.withValue(node, value, clock)
case None ⇒ LWWRegister(node, value, clock)
@@ -137,7 +138,7 @@ final class LWWMap[A, B] private[akka] (
/**
* INTERNAL API
*/
- private[akka] def remove(node: UniqueAddress, key: A): LWWMap[A, B] =
+ @InternalApi private[akka] def remove(node: UniqueAddress, key: A): LWWMap[A, B] =
new LWWMap(underlying.remove(node, key))
override def merge(that: LWWMap[A, B]): LWWMap[A, B] =
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala
index e26a763b0f..478a8d45b4 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala
@@ -6,6 +6,7 @@ package akka.cluster.ddata
import akka.cluster.Cluster
import akka.cluster.UniqueAddress
import akka.util.HashCode
+import akka.annotation.InternalApi
object LWWRegister {
@@ -43,7 +44,7 @@ object LWWRegister {
/**
* INTERNAL API
*/
- private[akka] def apply[A](node: UniqueAddress, initialValue: A, clock: Clock[A]): LWWRegister[A] =
+ @InternalApi private[akka] def apply[A](node: UniqueAddress, initialValue: A, clock: Clock[A]): LWWRegister[A] =
new LWWRegister(node, initialValue, clock(0L, initialValue))
def apply[A](initialValue: A)(implicit node: Cluster, clock: Clock[A] = defaultClock[A]): LWWRegister[A] =
@@ -148,7 +149,7 @@ final class LWWRegister[A] private[akka] (
/**
* INTERNAL API
*/
- private[akka] def withValue(node: UniqueAddress, value: A, clock: Clock[A]): LWWRegister[A] =
+ @InternalApi private[akka] def withValue(node: UniqueAddress, value: A, clock: Clock[A]): LWWRegister[A] =
new LWWRegister(node, value, clock(timestamp, value))
override def merge(that: LWWRegister[A]): LWWRegister[A] =
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala
index 309d052ddc..b8cf38edd4 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala
@@ -6,6 +6,7 @@ package akka.cluster.ddata
import akka.cluster.Cluster
import akka.cluster.UniqueAddress
import akka.util.HashCode
+import akka.annotation.InternalApi
object ORMap {
private val _empty: ORMap[Any, ReplicatedData] = new ORMap(ORSet.empty, Map.empty)
@@ -93,7 +94,7 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
/**
* INTERNAL API
*/
- private[akka] def put(node: UniqueAddress, key: A, value: B): ORMap[A, B] =
+ @InternalApi private[akka] def put(node: UniqueAddress, key: A, value: B): ORMap[A, B] =
if (value.isInstanceOf[ORSet[_]] && values.contains(key))
throw new IllegalArgumentException(
"`ORMap.put` must not be used to replace an existing `ORSet` " +
@@ -123,7 +124,7 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
/**
* INTERNAL API
*/
- private[akka] def updated(node: UniqueAddress, key: A, initial: B)(modify: B ⇒ B): ORMap[A, B] = {
+ @InternalApi private[akka] def updated(node: UniqueAddress, key: A, initial: B)(modify: B ⇒ B): ORMap[A, B] = {
val newValue = values.get(key) match {
case Some(old) ⇒ modify(old)
case _ ⇒ modify(initial)
@@ -148,7 +149,7 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
/**
* INTERNAL API
*/
- private[akka] def remove(node: UniqueAddress, key: A): ORMap[A, B] = {
+ @InternalApi private[akka] def remove(node: UniqueAddress, key: A): ORMap[A, B] = {
new ORMap(keys.remove(node, key), values - key)
}
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala
index 4bfea5b4f4..d95d04409b 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala
@@ -4,6 +4,7 @@
package akka.cluster.ddata
import akka.cluster.{ UniqueAddress, Cluster }
+import akka.annotation.InternalApi
object ORMultiMap {
@@ -113,7 +114,7 @@ final class ORMultiMap[A, B] private[akka] (private[akka] val underlying: ORMap[
/**
* INTERNAL API
*/
- private[akka] def put(node: UniqueAddress, key: A, value: Set[B]): ORMultiMap[A, B] = {
+ @InternalApi private[akka] def put(node: UniqueAddress, key: A, value: Set[B]): ORMultiMap[A, B] = {
val newUnderlying = underlying.updated(node, key, ORSet.empty[B]) { existing ⇒
value.foldLeft(existing.clear(node)) { (s, element) ⇒ s.add(node, element) }
}
@@ -136,7 +137,7 @@ final class ORMultiMap[A, B] private[akka] (private[akka] val underlying: ORMap[
/**
* INTERNAL API
*/
- private[akka] def remove(node: UniqueAddress, key: A): ORMultiMap[A, B] =
+ @InternalApi private[akka] def remove(node: UniqueAddress, key: A): ORMultiMap[A, B] =
new ORMultiMap(underlying.remove(node, key))
/**
@@ -154,7 +155,7 @@ final class ORMultiMap[A, B] private[akka] (private[akka] val underlying: ORMap[
/**
* INTERNAL API
*/
- private[akka] def addBinding(node: UniqueAddress, key: A, element: B): ORMultiMap[A, B] = {
+ @InternalApi private[akka] def addBinding(node: UniqueAddress, key: A, element: B): ORMultiMap[A, B] = {
val newUnderlying = underlying.updated(node, key, ORSet.empty[B])(_.add(node, element))
new ORMultiMap(newUnderlying)
}
@@ -176,7 +177,7 @@ final class ORMultiMap[A, B] private[akka] (private[akka] val underlying: ORMap[
/**
* INTERNAL API
*/
- private[akka] def removeBinding(node: UniqueAddress, key: A, element: B): ORMultiMap[A, B] = {
+ @InternalApi private[akka] def removeBinding(node: UniqueAddress, key: A, element: B): ORMultiMap[A, B] = {
val newUnderlying = {
val u = underlying.updated(node, key, ORSet.empty[B])(_.remove(node, element))
u.get(key) match {
@@ -198,7 +199,7 @@ final class ORMultiMap[A, B] private[akka] (private[akka] val underlying: ORMap[
/**
* INTERNAL API
*/
- private[akka] def replaceBinding(node: UniqueAddress, key: A, oldElement: B, newElement: B): ORMultiMap[A, B] =
+ @InternalApi private[akka] def replaceBinding(node: UniqueAddress, key: A, oldElement: B, newElement: B): ORMultiMap[A, B] =
if (newElement != oldElement)
addBinding(node, key, newElement).removeBinding(node, key, oldElement)
else
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala
index 17b37d018e..2c40a3ead3 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala
@@ -4,10 +4,12 @@
package akka.cluster.ddata
import scala.annotation.tailrec
+import scala.collection.immutable
import akka.cluster.Cluster
import akka.cluster.UniqueAddress
import akka.util.HashCode
+import akka.annotation.InternalApi
object ORSet {
private val _empty: ORSet[Any] = new ORSet(Map.empty, VersionVector.empty)
@@ -34,7 +36,75 @@ object ORSet {
/**
* INTERNAL API
*/
- private[akka]type Dot = VersionVector
+ @InternalApi private[akka]type Dot = VersionVector
+
+ sealed trait DeltaOp extends ReplicatedDelta with RequiresCausalDeliveryOfDeltas {
+ type T = DeltaOp
+ }
+
+ /**
+ * INTERNAL API
+ */
+ @InternalApi private[akka] sealed abstract class AtomicDeltaOp[A] extends DeltaOp {
+ def underlying: ORSet[A]
+ override def zero: ORSet[A] = ORSet.empty
+ }
+
+ /** INTERNAL API */
+ @InternalApi private[akka] final case class AddDeltaOp[A](underlying: ORSet[A]) extends AtomicDeltaOp[A] {
+
+ override def merge(that: DeltaOp): DeltaOp = that match {
+ case AddDeltaOp(u) ⇒
+ // Note that we only merge deltas originating from the same node
+ AddDeltaOp(new ORSet(
+ concatElementsMap(u.elementsMap.asInstanceOf[Map[A, Dot]]),
+ underlying.vvector.merge(u.vvector)))
+ case _: AtomicDeltaOp[A] ⇒ DeltaGroup(Vector(this, that))
+ case DeltaGroup(ops) ⇒ DeltaGroup(this +: ops)
+ }
+
+ private def concatElementsMap(thatMap: Map[A, Dot]): Map[A, Dot] = {
+ if (thatMap.size == 1) {
+ val head = thatMap.head
+ underlying.elementsMap.updated(head._1, head._2)
+ } else
+ underlying.elementsMap ++ thatMap
+ }
+ }
+
+ /** INTERNAL API */
+ @InternalApi private[akka] final case class RemoveDeltaOp[A](underlying: ORSet[A]) extends AtomicDeltaOp[A] {
+ if (underlying.size != 1)
+ throw new IllegalArgumentException(s"RemoveDeltaOp should contain one removed element, but was $underlying")
+
+ override def merge(that: DeltaOp): DeltaOp = that match {
+ case _: AtomicDeltaOp[A] ⇒ DeltaGroup(Vector(this, that)) // keep it simple for removals
+ case DeltaGroup(ops) ⇒ DeltaGroup(this +: ops)
+ }
+ }
+
+ /** INTERNAL API: Used for `clear` but could be used for other cases also */
+ @InternalApi private[akka] final case class FullStateDeltaOp[A](underlying: ORSet[A]) extends AtomicDeltaOp[A] {
+ override def merge(that: DeltaOp): DeltaOp = that match {
+ case _: AtomicDeltaOp[A] ⇒ DeltaGroup(Vector(this, that))
+ case DeltaGroup(ops) ⇒ DeltaGroup(this +: ops)
+ }
+ }
+
+ final case class DeltaGroup[A](ops: immutable.IndexedSeq[DeltaOp]) extends DeltaOp {
+ override def merge(that: DeltaOp): DeltaOp = that match {
+ case thatAdd: AddDeltaOp[A] ⇒
+ // merge AddDeltaOp into last AddDeltaOp in the group, if possible
+ ops.last match {
+ case thisAdd: AddDeltaOp[A] ⇒ DeltaGroup(ops.dropRight(1) :+ thisAdd.merge(thatAdd))
+ case _ ⇒ DeltaGroup(ops :+ thatAdd)
+ }
+ case DeltaGroup(thatOps) ⇒ DeltaGroup(ops ++ thatOps)
+ case _ ⇒ DeltaGroup(ops :+ that)
+ }
+
+ override def zero: ORSet[A] = ORSet.empty
+ }
/**
* INTERNAL API
@@ -45,7 +115,7 @@ object ORSet {
* [{a, 4}, {b, 1}, {c, 1}, {d, 14}, {e, 5}, {f, 2}] =
* [{b, 2}, {g, 22}]
*/
- private[akka] def subtractDots(dot: Dot, vvector: VersionVector): Dot = {
+ @InternalApi private[akka] def subtractDots(dot: Dot, vvector: VersionVector): Dot = {
@tailrec def dropDots(remaining: List[(UniqueAddress, Long)], acc: List[(UniqueAddress, Long)]): List[(UniqueAddress, Long)] =
remaining match {
@@ -80,7 +150,7 @@ object ORSet {
* INTERNAL API
* @see [[ORSet#merge]]
*/
- private[akka] def mergeCommonKeys[A](commonKeys: Set[A], lhs: ORSet[A], rhs: ORSet[A]): Map[A, ORSet.Dot] =
+ @InternalApi private[akka] def mergeCommonKeys[A](commonKeys: Set[A], lhs: ORSet[A], rhs: ORSet[A]): Map[A, ORSet.Dot] =
mergeCommonKeys(commonKeys.iterator, lhs, rhs)
private def mergeCommonKeys[A](commonKeys: Iterator[A], lhs: ORSet[A], rhs: ORSet[A]): Map[A, ORSet.Dot] = {
@@ -149,8 +219,9 @@ object ORSet {
* INTERNAL API
* @see [[ORSet#merge]]
*/
- private[akka] def mergeDisjointKeys[A](keys: Set[A], elementsMap: Map[A, ORSet.Dot], vvector: VersionVector,
- accumulator: Map[A, ORSet.Dot]): Map[A, ORSet.Dot] =
+ @InternalApi private[akka] def mergeDisjointKeys[A](
+ keys: Set[A], elementsMap: Map[A, ORSet.Dot], vvector: VersionVector,
+ accumulator: Map[A, ORSet.Dot]): Map[A, ORSet.Dot] =
mergeDisjointKeys(keys.iterator, elementsMap, vvector, accumulator)
private def mergeDisjointKeys[A](keys: Iterator[A], elementsMap: Map[A, ORSet.Dot], vvector: VersionVector,
@@ -201,10 +272,15 @@ object ORSet {
@SerialVersionUID(1L)
final class ORSet[A] private[akka] (
private[akka] val elementsMap: Map[A, ORSet.Dot],
- private[akka] val vvector: VersionVector)
- extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning with FastMerge {
+ private[akka] val vvector: VersionVector,
+ override val delta: Option[ORSet.DeltaOp] = None)
+ extends DeltaReplicatedData
+ with ReplicatedDataSerialization with RemovedNodePruning with FastMerge {
+
+ import ORSet.{ AddDeltaOp, RemoveDeltaOp }
type T = ORSet[A]
+ type D = ORSet.DeltaOp
/**
* Scala API
@@ -238,10 +314,18 @@ final class ORSet[A] private[akka] (
/**
* INTERNAL API
*/
- private[akka] def add(node: UniqueAddress, element: A): ORSet[A] = {
+ @InternalApi private[akka] def add(node: UniqueAddress, element: A): ORSet[A] = {
val newVvector = vvector + node
val newDot = VersionVector(node, newVvector.versionAt(node))
- assignAncestor(new ORSet(elementsMap = elementsMap.updated(element, newDot), vvector = newVvector))
+ val newDelta = delta match {
+ case None ⇒
+ ORSet.AddDeltaOp(new ORSet(Map(element → newDot), newDot))
+ case Some(existing: ORSet.AddDeltaOp[A]) ⇒
+ existing.merge(ORSet.AddDeltaOp(new ORSet(Map(element → newDot), newDot)))
+ case Some(d) ⇒
+ d.merge(ORSet.AddDeltaOp(new ORSet(Map(element → newDot), newDot)))
+ }
+ assignAncestor(new ORSet(elementsMap.updated(element, newDot), newVvector, Some(newDelta)))
}
/**
@@ -257,8 +341,15 @@ final class ORSet[A] private[akka] (
/**
* INTERNAL API
*/
- private[akka] def remove(node: UniqueAddress, element: A): ORSet[A] =
- assignAncestor(copy(elementsMap = elementsMap - element))
+ @InternalApi private[akka] def remove(node: UniqueAddress, element: A): ORSet[A] = {
+ val deltaDot = VersionVector(node, vvector.versionAt(node))
+ val rmOp = ORSet.RemoveDeltaOp(new ORSet(Map(element → deltaDot), vvector))
+ val newDelta = delta match {
+ case None ⇒ rmOp
+ case Some(d) ⇒ d.merge(rmOp)
+ }
+ assignAncestor(copy(elementsMap = elementsMap - element, delta = Some(newDelta)))
+ }
/**
* Removes all elements from the set, but keeps the history.
@@ -270,8 +361,15 @@ final class ORSet[A] private[akka] (
/**
* INTERNAL API
*/
- private[akka] def clear(node: UniqueAddress): ORSet[A] =
- assignAncestor(copy(elementsMap = Map.empty))
+ @InternalApi private[akka] def clear(node: UniqueAddress): ORSet[A] = {
+ val newFullState = new ORSet[A](elementsMap = Map.empty, vvector)
+ val clearOp = ORSet.FullStateDeltaOp(newFullState)
+ val newDelta = delta match {
+ case None ⇒ clearOp
+ case Some(d) ⇒ d.merge(clearOp)
+ }
+ assignAncestor(newFullState.copy(delta = Some(newDelta)))
+ }
/**
* When element is in this Set but not in that Set:
@@ -289,24 +387,72 @@ final class ORSet[A] private[akka] (
override def merge(that: ORSet[A]): ORSet[A] = {
if ((this eq that) || that.isAncestorOf(this)) this.clearAncestor()
else if (this.isAncestorOf(that)) that.clearAncestor()
- else {
- val commonKeys =
- if (this.elementsMap.size < that.elementsMap.size)
- this.elementsMap.keysIterator.filter(that.elementsMap.contains)
- else
- that.elementsMap.keysIterator.filter(this.elementsMap.contains)
- val entries00 = ORSet.mergeCommonKeys(commonKeys, this, that)
- val thisUniqueKeys = this.elementsMap.keysIterator.filterNot(that.elementsMap.contains)
- val entries0 = ORSet.mergeDisjointKeys(thisUniqueKeys, this.elementsMap, that.vvector, entries00)
- val thatUniqueKeys = that.elementsMap.keysIterator.filterNot(this.elementsMap.contains)
- val entries = ORSet.mergeDisjointKeys(thatUniqueKeys, that.elementsMap, this.vvector, entries0)
- val mergedVvector = this.vvector.merge(that.vvector)
+ else dryMerge(that, addDeltaOp = false)
+ }
- clearAncestor()
- new ORSet(entries, mergedVvector)
+ // share merge impl between full state merge and AddDeltaOp merge
+ private def dryMerge(that: ORSet[A], addDeltaOp: Boolean): ORSet[A] = {
+ val commonKeys =
+ if (this.elementsMap.size < that.elementsMap.size)
+ this.elementsMap.keysIterator.filter(that.elementsMap.contains)
+ else
+ that.elementsMap.keysIterator.filter(this.elementsMap.contains)
+ val entries00 = ORSet.mergeCommonKeys(commonKeys, this, that)
+ val entries0 =
+ if (addDeltaOp)
+ entries00 ++ this.elementsMap.filter { case (elem, _) ⇒ !that.elementsMap.contains(elem) }
+ else {
+ val thisUniqueKeys = this.elementsMap.keysIterator.filterNot(that.elementsMap.contains)
+ ORSet.mergeDisjointKeys(thisUniqueKeys, this.elementsMap, that.vvector, entries00)
+ }
+ val thatUniqueKeys = that.elementsMap.keysIterator.filterNot(this.elementsMap.contains)
+ val entries = ORSet.mergeDisjointKeys(thatUniqueKeys, that.elementsMap, this.vvector, entries0)
+ val mergedVvector = this.vvector.merge(that.vvector)
+
+ clearAncestor()
+ new ORSet(entries, mergedVvector)
+ }
+
+ override def mergeDelta(thatDelta: ORSet.DeltaOp): ORSet[A] = {
+ thatDelta match {
+ case d: ORSet.AddDeltaOp[A] ⇒ dryMerge(d.underlying, addDeltaOp = true)
+ case d: ORSet.RemoveDeltaOp[A] ⇒ mergeRemoveDelta(d)
+ case d: ORSet.FullStateDeltaOp[A] ⇒ dryMerge(d.underlying, addDeltaOp = false)
+ case ORSet.DeltaGroup(ops) ⇒
+ ops.foldLeft(this) {
+ case (acc, op: ORSet.AddDeltaOp[A]) ⇒ acc.dryMerge(op.underlying, addDeltaOp = true)
+ case (acc, op: ORSet.RemoveDeltaOp[A]) ⇒ acc.mergeRemoveDelta(op)
+ case (acc, op: ORSet.FullStateDeltaOp[A]) ⇒ acc.dryMerge(op.underlying, addDeltaOp = false)
+ case (acc, op: ORSet.DeltaGroup[A]) ⇒
+ throw new IllegalArgumentException("ORSet.DeltaGroup should not be nested")
+ }
}
}
+ private def mergeRemoveDelta(thatDelta: ORSet.RemoveDeltaOp[A]): ORSet[A] = {
+ val that = thatDelta.underlying
+ val (elem, thatDot) = that.elementsMap.head
+ val newElementsMap =
+ if (that.vvector > vvector || that.vvector == vvector)
+ elementsMap - elem
+ else {
+ elementsMap.get(elem) match {
+ case Some(thisDot) ⇒
+ if (thatDot == thisDot || thatDot > thisDot) elementsMap - elem
+ else elementsMap
+ case None ⇒
+ elementsMap
+ }
+ }
+ clearAncestor()
+ val newVvector = vvector.merge(that.vvector)
+ new ORSet(newElementsMap, newVvector)
+ }
+
+ override def resetDelta: ORSet[A] =
+ if (delta.isEmpty) this
+ else assignAncestor(new ORSet(elementsMap, vvector))
+
override def modifiedByNodes: Set[UniqueAddress] =
vvector.modifiedByNodes
@@ -339,8 +485,9 @@ final class ORSet[A] private[akka] (
new ORSet(updated, vvector.pruningCleanup(removedNode))
}
- private def copy(elementsMap: Map[A, ORSet.Dot] = this.elementsMap, vvector: VersionVector = this.vvector): ORSet[A] =
- new ORSet(elementsMap, vvector)
+ private def copy(elementsMap: Map[A, ORSet.Dot] = this.elementsMap, vvector: VersionVector = this.vvector,
+ delta: Option[ORSet.DeltaOp] = this.delta): ORSet[A] =
+ new ORSet(elementsMap, vvector, delta)
// this class cannot be a `case class` because we need different `unapply`
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala
index 6bf92fdfd8..55e77eb053 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala
@@ -39,9 +39,11 @@ object PNCounter {
@SerialVersionUID(1L)
final class PNCounter private[akka] (
private[akka] val increments: GCounter, private[akka] val decrements: GCounter)
- extends DeltaReplicatedData with ReplicatedDataSerialization with RemovedNodePruning {
+ extends DeltaReplicatedData with ReplicatedDelta
+ with ReplicatedDataSerialization with RemovedNodePruning {
type T = PNCounter
+ type D = PNCounter
/**
* Scala API: Current total value of the counter.
@@ -94,9 +96,29 @@ final class PNCounter private[akka] (
increments = that.increments.merge(this.increments),
decrements = that.decrements.merge(this.decrements))
- override def delta: PNCounter = new PNCounter(increments.delta, decrements.delta)
+ override def delta: Option[PNCounter] = {
+ if (increments.delta.isEmpty && decrements.delta.isEmpty)
+ None
+ else {
+ val incrementsDelta = increments.delta match {
+ case Some(d) ⇒ d
+ case None ⇒ GCounter.empty
+ }
+ val decrementsDelta = decrements.delta match {
+ case Some(d) ⇒ d
+ case None ⇒ GCounter.empty
+ }
+ Some(new PNCounter(incrementsDelta, decrementsDelta))
+ }
+ }
- override def resetDelta: PNCounter = new PNCounter(increments.resetDelta, decrements.resetDelta)
+ override def mergeDelta(thatDelta: PNCounter): PNCounter = merge(thatDelta)
+
+ override def zero: PNCounter = PNCounter.empty
+
+ override def resetDelta: PNCounter =
+ if (increments.delta.isEmpty && decrements.delta.isEmpty) this
+ else new PNCounter(increments.resetDelta, decrements.resetDelta)
override def modifiedByNodes: Set[UniqueAddress] =
increments.modifiedByNodes union decrements.modifiedByNodes
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala
index 301800a3df..6fed4cc47c 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala
@@ -6,6 +6,7 @@ package akka.cluster.ddata
import akka.cluster.Cluster
import akka.cluster.UniqueAddress
import java.math.BigInteger
+import akka.annotation.InternalApi
object PNCounterMap {
def empty[A]: PNCounterMap[A] = new PNCounterMap(ORMap.empty)
@@ -75,7 +76,7 @@ final class PNCounterMap[A] private[akka] (
/**
* INTERNAL API
*/
- private[akka] def increment(node: UniqueAddress, key: A, delta: Long): PNCounterMap[A] =
+ @InternalApi private[akka] def increment(node: UniqueAddress, key: A, delta: Long): PNCounterMap[A] =
new PNCounterMap(underlying.updated(node, key, PNCounter())(_.increment(node, delta)))
/**
@@ -95,7 +96,7 @@ final class PNCounterMap[A] private[akka] (
/**
* INTERNAL API
*/
- private[akka] def decrement(node: UniqueAddress, key: A, delta: Long): PNCounterMap[A] = {
+ @InternalApi private[akka] def decrement(node: UniqueAddress, key: A, delta: Long): PNCounterMap[A] = {
new PNCounterMap(underlying.updated(node, key, PNCounter())(_.decrement(node, delta)))
}
@@ -117,7 +118,7 @@ final class PNCounterMap[A] private[akka] (
/**
* INTERNAL API
*/
- private[akka] def remove(node: UniqueAddress, key: A): PNCounterMap[A] =
+ @InternalApi private[akka] def remove(node: UniqueAddress, key: A): PNCounterMap[A] =
new PNCounterMap(underlying.remove(node, key))
override def merge(that: PNCounterMap[A]): PNCounterMap[A] =
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala
index d16bd79c43..5b160b08fd 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala
@@ -6,11 +6,12 @@ package akka.cluster.ddata
import akka.actor.Address
import akka.cluster.Member
import akka.cluster.UniqueAddress
+import akka.annotation.InternalApi
/**
* INTERNAL API
*/
-private[akka] object PruningState {
+@InternalApi private[akka] object PruningState {
final case class PruningInitialized(owner: UniqueAddress, seen: Set[Address]) extends PruningState {
override def addSeen(node: Address): PruningState = {
if (seen(node) || owner.address == node) this
@@ -25,7 +26,7 @@ private[akka] object PruningState {
/**
* INTERNAL API
*/
-private[akka] sealed trait PruningState {
+@InternalApi private[akka] sealed trait PruningState {
import PruningState._
def merge(that: PruningState): PruningState =
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala
index 117e9c5a43..98bd1134fd 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala
@@ -4,6 +4,8 @@
package akka.cluster.ddata
import akka.cluster.UniqueAddress
+import scala.compat.java8.OptionConverters._
+import java.util.Optional
/**
* Interface for implementing a state based convergent
@@ -52,6 +54,13 @@ trait ReplicatedData {
*/
trait DeltaReplicatedData extends ReplicatedData {
+ /**
+ * The type of the delta. To be specified by subclass.
+ * It may be the same type as `T` or a different type if needed.
+ * For example `GSet` uses the same type and `ORSet` uses different types.
+ */
+ type D <: ReplicatedDelta
+
/**
* The accumulated delta of mutator operations since previous
* [[#resetDelta]]. When the `Replicator` invokes the `modify` function
@@ -61,7 +70,14 @@ trait DeltaReplicatedData extends ReplicatedData {
* `modify` function shall still return the full state in the same way as
* `ReplicatedData` without support for deltas.
*/
- def delta: T
+ def delta: Option[D]
+
+ /**
+ * When delta is merged into the full state this method is used.
+ * When the type `D` of the delta is of the same type as the full state `T`
+ * this method can be implemented by delegating to `merge`.
+ */
+ def mergeDelta(thatDelta: D): T
/**
* Reset collection of deltas from mutator operations. When the `Replicator`
@@ -76,14 +92,36 @@ trait DeltaReplicatedData extends ReplicatedData {
}
+/**
+ * The delta must implement this type.
+ */
+trait ReplicatedDelta extends ReplicatedData {
+ /**
+ * The empty full state. This is used when a delta is received
+ * and no existing full state exists on the receiving side. Then
+ * the delta is merged into the `zero` to create the initial full state.
+ */
+ def zero: DeltaReplicatedData
+}
+
+/**
+ * Marker that specifies that the deltas must be applied in causal order.
+ * There is some overhead of managing the causal delivery so it should only
+ * be used for types that need it.
+ *
+ * Note that if the full state type `T` is different from the delta type `D`
+ * it is the delta `D` that should be marked with this.
+ */
+trait RequiresCausalDeliveryOfDeltas extends ReplicatedDelta
+
/**
* Java API: Interface for implementing a [[ReplicatedData]] in Java.
*
- * The type parameter `D` is a self-recursive type to be defined by the
+ * The type parameter `A` is a self-recursive type to be defined by the
* concrete implementation.
* E.g. `class TwoPhaseSet extends AbstractReplicatedData<TwoPhaseSet>`
*/
-abstract class AbstractReplicatedData[D <: AbstractReplicatedData[D]] extends ReplicatedData {
+abstract class AbstractReplicatedData[A <: AbstractReplicatedData[A]] extends ReplicatedData {
override type T = ReplicatedData
@@ -91,24 +129,57 @@ abstract class AbstractReplicatedData[D <: AbstractReplicatedData[D]] extends Re
* Delegates to [[#mergeData]], which must be implemented by subclass.
*/
final override def merge(that: ReplicatedData): ReplicatedData =
- mergeData(that.asInstanceOf[D])
+ mergeData(that.asInstanceOf[A])
/**
* Java API: Monotonic merge function.
*/
- def mergeData(that: D): D
+ def mergeData(that: A): A
}
/**
* Java API: Interface for implementing a [[DeltaReplicatedData]] in Java.
*
- * The type parameter `D` is a self-recursive type to be defined by the
+ * The type parameter `A` is a self-recursive type to be defined by the
* concrete implementation.
- * E.g. `class TwoPhaseSet extends AbstractDeltaReplicatedData<TwoPhaseSet>`
+ * E.g. `class TwoPhaseSet extends AbstractDeltaReplicatedData<TwoPhaseSet, TwoPhaseSet>`
*/
-abstract class AbstractDeltaReplicatedData[D <: AbstractDeltaReplicatedData[D]]
- extends AbstractReplicatedData[D] with DeltaReplicatedData {
+abstract class AbstractDeltaReplicatedData[A <: AbstractDeltaReplicatedData[A, B], B <: ReplicatedDelta]
+ extends AbstractReplicatedData[A] with DeltaReplicatedData {
+
+ override type D = ReplicatedDelta
+
+ /**
+ * Delegates to [[#deltaData]], which must be implemented by subclass.
+ */
+ final override def delta: Option[ReplicatedDelta] =
+ deltaData.asScala
+
+ /**
+ * The accumulated delta of mutator operations since previous
+ * [[#resetDelta]]. When the `Replicator` invokes the `modify` function
+ * of the `Update` message and the user code is invoking one or more mutator
+ * operations the data is collecting the delta of the operations and makes
+ * it available for the `Replicator` with the [[#deltaData]] accessor. The
+ * `modify` function shall still return the full state in the same way as
+ * `ReplicatedData` without support for deltas.
+ */
+ def deltaData: Optional[B]
+
+ /**
+ * Delegates to [[#mergeDeltaData]], which must be implemented by subclass.
+ */
+ final override def mergeDelta(that: ReplicatedDelta): ReplicatedData =
+ mergeDeltaData(that.asInstanceOf[B])
+
+ /**
+ * When delta is merged into the full state this method is used.
+ * When the type `D` of the delta is of the same type as the full state `T`
+ * this method can be implemented by delegating to `mergeData`.
+ */
+ def mergeDeltaData(that: B): A
+
}
/**
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala
index 1870092830..0a96101ad0 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala
@@ -45,6 +45,9 @@ import akka.actor.ActorInitializationException
import java.util.concurrent.TimeUnit
import akka.util.Helpers.toRootLowerCase
import akka.actor.Cancellable
+import scala.util.control.NonFatal
+import akka.cluster.ddata.Key.KeyId
+import akka.annotation.InternalApi
object ReplicatorSettings {
@@ -89,7 +92,7 @@ object ReplicatorSettings {
/**
* INTERNAL API
*/
- private[akka] def roleOption(role: String): Option[String] =
+ @InternalApi private[akka] def roleOption(role: String): Option[String] =
if (role == "") None else Option(role)
}
@@ -128,7 +131,7 @@ final class ReplicatorSettings(
val pruningInterval: FiniteDuration,
val maxPruningDissemination: FiniteDuration,
val durableStoreProps: Either[(String, Config), Props],
- val durableKeys: Set[String],
+ val durableKeys: Set[KeyId],
val pruningMarkerTimeToLive: FiniteDuration,
val durablePruningMarkerTimeToLive: FiniteDuration,
val deltaCrdtEnabled: Boolean) {
@@ -183,7 +186,7 @@ final class ReplicatorSettings(
/**
* Scala API
*/
- def withDurableKeys(durableKeys: Set[String]): ReplicatorSettings =
+ def withDurableKeys(durableKeys: Set[KeyId]): ReplicatorSettings =
copy(durableKeys = durableKeys)
/**
@@ -206,7 +209,7 @@ final class ReplicatorSettings(
pruningInterval: FiniteDuration = pruningInterval,
maxPruningDissemination: FiniteDuration = maxPruningDissemination,
durableStoreProps: Either[(String, Config), Props] = durableStoreProps,
- durableKeys: Set[String] = durableKeys,
+ durableKeys: Set[KeyId] = durableKeys,
pruningMarkerTimeToLive: FiniteDuration = pruningMarkerTimeToLive,
durablePruningMarkerTimeToLive: FiniteDuration = durablePruningMarkerTimeToLive,
deltaCrdtEnabled: Boolean = deltaCrdtEnabled): ReplicatorSettings =
@@ -270,12 +273,12 @@ object Replicator {
/**
* INTERNAL API
*/
- private[akka] case object GetKeyIds
+ @InternalApi private[akka] case object GetKeyIds
/**
* INTERNAL API
*/
- private[akka] final case class GetKeyIdsResult(keyIds: Set[String]) {
+ @InternalApi private[akka] final case class GetKeyIdsResult(keyIds: Set[KeyId]) {
/**
* Java API
*/
@@ -576,18 +579,18 @@ object Replicator {
/**
* INTERNAL API
*/
- private[akka] object Internal {
+ @InternalApi private[akka] object Internal {
case object GossipTick
case object DeltaPropagationTick
case object RemovedNodePruningTick
case object ClockTick
- final case class Write(key: String, envelope: DataEnvelope) extends ReplicatorMessage
+ final case class Write(key: KeyId, envelope: DataEnvelope) extends ReplicatorMessage
case object WriteAck extends ReplicatorMessage with DeadLetterSuppression
case object WriteNack extends ReplicatorMessage with DeadLetterSuppression
- final case class Read(key: String) extends ReplicatorMessage
+ final case class Read(key: KeyId) extends ReplicatorMessage
final case class ReadResult(envelope: Option[DataEnvelope]) extends ReplicatorMessage with DeadLetterSuppression
- final case class ReadRepair(key: String, envelope: DataEnvelope)
+ final case class ReadRepair(key: KeyId, envelope: DataEnvelope)
case object ReadRepairAck
// for testing purposes
final case class TestFullStateGossip(enabled: Boolean)
@@ -603,12 +606,24 @@ object Replicator {
* The `DataEnvelope` wraps a data entry and carries state of the pruning process for the entry.
*/
final case class DataEnvelope(
- data: ReplicatedData,
- pruning: Map[UniqueAddress, PruningState] = Map.empty)
+ data: ReplicatedData,
+ pruning: Map[UniqueAddress, PruningState] = Map.empty,
+ deltaVersions: VersionVector = VersionVector.empty)
extends ReplicatorMessage {
import PruningState._
+ def withoutDeltaVersions: DataEnvelope =
+ if (deltaVersions.isEmpty) this
+ else copy(deltaVersions = VersionVector.empty)
+
+ /**
+ * We only use the deltaVersions to track versions per node, not for ordering comparisons,
+ * so we can just remove the entry for the removed node.
+ */
+ private def cleanedDeltaVersions(from: UniqueAddress): VersionVector =
+ deltaVersions.pruningCleanup(from)
+
def needPruningFrom(removedNode: UniqueAddress): Boolean =
data match {
case r: RemovedNodePruning ⇒ r.needPruningFrom(removedNode)
@@ -616,7 +631,9 @@ object Replicator {
}
def initRemovedNodePruning(removed: UniqueAddress, owner: UniqueAddress): DataEnvelope = {
- copy(pruning = pruning.updated(removed, PruningInitialized(owner, Set.empty)))
+ copy(
+ pruning = pruning.updated(removed, PruningInitialized(owner, Set.empty)),
+ deltaVersions = cleanedDeltaVersions(removed))
}
def prune(from: UniqueAddress, pruningPerformed: PruningPerformed): DataEnvelope = {
@@ -626,7 +643,8 @@ object Replicator {
pruning(from) match {
case PruningInitialized(owner, _) ⇒
val prunedData = dataWithRemovedNodePruning.prune(from, owner)
- copy(data = prunedData, pruning = pruning.updated(from, pruningPerformed))
+ copy(data = prunedData, pruning = pruning.updated(from, pruningPerformed),
+ deltaVersions = cleanedDeltaVersions(from))
case _ ⇒
this
}
@@ -659,13 +677,36 @@ object Replicator {
}
}
- // cleanup both sides before merging, `merge((otherData: ReplicatedData)` will cleanup other.data
- copy(data = cleaned(data, filteredMergedPruning), pruning = filteredMergedPruning).merge(other.data)
+ // cleanup and merge deltaVersions
+ val removedNodes = filteredMergedPruning.keys
+ val cleanedDV = removedNodes.foldLeft(deltaVersions) { (acc, node) ⇒ acc.pruningCleanup(node) }
+ val cleanedOtherDV = removedNodes.foldLeft(other.deltaVersions) { (acc, node) ⇒ acc.pruningCleanup(node) }
+ val mergedDeltaVersions = cleanedDV.merge(cleanedOtherDV)
+
+ // cleanup both sides before merging, `merge(otherData: ReplicatedData)` will cleanup other.data
+ copy(
+ data = cleaned(data, filteredMergedPruning),
+ deltaVersions = mergedDeltaVersions,
+ pruning = filteredMergedPruning).merge(other.data)
}
- def merge(otherData: ReplicatedData): DataEnvelope =
+ def merge(otherData: ReplicatedData): DataEnvelope = {
if (otherData == DeletedData) DeletedEnvelope
- else copy(data = data merge cleaned(otherData, pruning).asInstanceOf[data.T])
+ else {
+ val mergedData =
+ cleaned(otherData, pruning) match {
+ case d: ReplicatedDelta ⇒ data match {
+ case drd: DeltaReplicatedData ⇒ drd.mergeDelta(d.asInstanceOf[drd.D])
+ case _ ⇒ throw new IllegalArgumentException("Expected DeltaReplicatedData")
+ }
+ case c ⇒ data.merge(c.asInstanceOf[data.T])
+ }
+ if (data.getClass != mergedData.getClass)
+ throw new IllegalArgumentException(
+ s"Wrong type, existing type [${data.getClass.getName}], got [${mergedData.getClass.getName}]")
+ copy(data = mergedData)
+ }
+ }
private def cleaned(c: ReplicatedData, p: Map[UniqueAddress, PruningState]): ReplicatedData = p.foldLeft(c) {
case (c: RemovedNodePruning, (removed, _: PruningPerformed)) ⇒
@@ -693,15 +734,16 @@ object Replicator {
override def merge(that: ReplicatedData): ReplicatedData = DeletedData
}
- final case class Status(digests: Map[String, Digest], chunk: Int, totChunks: Int) extends ReplicatorMessage {
+ final case class Status(digests: Map[KeyId, Digest], chunk: Int, totChunks: Int) extends ReplicatorMessage {
override def toString: String =
(digests.map {
case (key, bytes) ⇒ key + " -> " + bytes.map(byte ⇒ f"$byte%02x").mkString("")
}).mkString("Status(", ", ", ")")
}
- final case class Gossip(updatedData: Map[String, DataEnvelope], sendBack: Boolean) extends ReplicatorMessage
+ final case class Gossip(updatedData: Map[KeyId, DataEnvelope], sendBack: Boolean) extends ReplicatorMessage
- final case class DeltaPropagation(deltas: Map[String, DataEnvelope]) extends ReplicatorMessage
+ final case class Delta(dataEnvelope: DataEnvelope, fromSeqNr: Long, toSeqNr: Long)
+ final case class DeltaPropagation(fromNode: UniqueAddress, deltas: Map[KeyId, Delta]) extends ReplicatorMessage
}
}
@@ -748,14 +790,13 @@ object Replicator {
* result in sending the delta {'c', 'd'} and merge that with the state on the
* receiving side, resulting in set {'a', 'b', 'c', 'd'}.
*
- * Current protocol for replicating the deltas does not support causal consistency.
- * It is only eventually consistent. This means that if elements 'c' and 'd' are
+ * The protocol for replicating the deltas supports causal consistency if the data type
+ * is marked with [[RequiresCausalDeliveryOfDeltas]]. Otherwise it is only eventually
+ * consistent. Without causal consistency it means that if elements 'c' and 'd' are
* added in two separate `Update` operations these deltas may occasionally be propagated
* to nodes in different order than the causal order of the updates. For this example it
* can result in that set {'a', 'b', 'd'} can be seen before element 'c' is seen. Eventually
- * it will be {'a', 'b', 'c', 'd'}. If causal consistency is needed the delta propagation
- * should be disabled with configuration property
- * `akka.cluster.distributed-data.delta-crdt.enabled=off`.
+ * it will be {'a', 'b', 'c', 'd'}.
*
* == Update ==
*
@@ -940,19 +981,19 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
context.system.deadLetters // not used
val deltaPropagationSelector = new DeltaPropagationSelector {
- override val divisor = 5
+ override val gossipIntervalDivisor = 5
override def allNodes: Vector[Address] = {
// TODO optimize, by maintaining a sorted instance variable instead
- nodes.union(weaklyUpNodes).toVector.sorted
+ nodes.union(weaklyUpNodes).diff(unreachable).toVector.sorted
}
- override def createDeltaPropagation(deltas: Map[String, ReplicatedData]): DeltaPropagation = {
+ override def createDeltaPropagation(deltas: Map[KeyId, (ReplicatedData, Long, Long)]): DeltaPropagation = {
// Important to include the pruning state in the deltas. For example if the delta is based
// on an entry that has been pruned but that has not yet been performed on the target node.
- DeltaPropagation(deltas.map {
- case (key, d) ⇒ getData(key) match {
- case Some(envelope) ⇒ key → envelope.copy(data = d)
- case None ⇒ key → DataEnvelope(d)
+ DeltaPropagation(selfUniqueAddress, deltas.map {
+ case (key, (d, fromSeqNr, toSeqNr)) ⇒ getData(key) match {
+ case Some(envelope) ⇒ key → Delta(envelope.copy(data = d), fromSeqNr, toSeqNr)
+ case None ⇒ key → Delta(DataEnvelope(d), fromSeqNr, toSeqNr)
}
}(collection.breakOut))
}
@@ -962,7 +1003,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
// Derive the deltaPropagationInterval from the gossipInterval.
// Normally the delta is propagated to all nodes within the gossip tick, so that
// full state gossip is not needed.
- val deltaPropagationInterval = (gossipInterval / deltaPropagationSelector.divisor).max(200.millis)
+ val deltaPropagationInterval = (gossipInterval / deltaPropagationSelector.gossipIntervalDivisor).max(200.millis)
Some(context.system.scheduler.schedule(deltaPropagationInterval, deltaPropagationInterval,
self, DeltaPropagationTick))
} else None
@@ -983,9 +1024,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
var unreachable = Set.empty[Address]
// the actual data
- var dataEntries = Map.empty[String, (DataEnvelope, Digest)]
+ var dataEntries = Map.empty[KeyId, (DataEnvelope, Digest)]
// keys that have changed, Changed event published to subscribers on FlushChanges
- var changed = Set.empty[String]
+ var changed = Set.empty[KeyId]
// for splitting up gossip in chunks
var statusCount = 0L
@@ -993,9 +1034,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
// possibility to disable Gossip for testing purpose
var fullStateGossipEnabled = true
- val subscribers = new mutable.HashMap[String, mutable.Set[ActorRef]] with mutable.MultiMap[String, ActorRef]
- val newSubscribers = new mutable.HashMap[String, mutable.Set[ActorRef]] with mutable.MultiMap[String, ActorRef]
- var subscriptionKeys = Map.empty[String, KeyR]
+ val subscribers = new mutable.HashMap[KeyId, mutable.Set[ActorRef]] with mutable.MultiMap[KeyId, ActorRef]
+ val newSubscribers = new mutable.HashMap[KeyId, mutable.Set[ActorRef]] with mutable.MultiMap[KeyId, ActorRef]
+ var subscriptionKeys = Map.empty[KeyId, KeyR]
// To be able to do efficient stashing we use this field instead of sender().
// Using internal buffer instead of Stash to avoid the overhead of the Stash mailbox.
@@ -1108,7 +1149,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
case Read(key) ⇒ receiveRead(key)
case Write(key, envelope) ⇒ receiveWrite(key, envelope)
case ReadRepair(key, envelope) ⇒ receiveReadRepair(key, envelope)
- case DeltaPropagation(deltas) ⇒ receiveDeltaPropagation(deltas)
+ case DeltaPropagation(from, deltas) ⇒ receiveDeltaPropagation(from, deltas)
case FlushChanges ⇒ receiveFlushChanges()
case DeltaPropagationTick ⇒ receiveDeltaPropagationTick()
case GossipTick ⇒ receiveGossipTick()
@@ -1138,9 +1179,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
log.debug("Received Get for key [{}], local data [{}]", key, localValue)
if (isLocalGet(consistency)) {
val reply = localValue match {
- case Some(DataEnvelope(DeletedData, _)) ⇒ DataDeleted(key, req)
- case Some(DataEnvelope(data, _)) ⇒ GetSuccess(key, req)(data)
- case None ⇒ NotFound(key, req)
+ case Some(DataEnvelope(DeletedData, _, _)) ⇒ DataDeleted(key, req)
+ case Some(DataEnvelope(data, _, _)) ⇒ GetSuccess(key, req)(data)
+ case None ⇒ NotFound(key, req)
}
replyTo ! reply
} else
@@ -1155,7 +1196,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
case _ ⇒ false
}
- def receiveRead(key: String): Unit = {
+ def receiveRead(key: KeyId): Unit = {
replyTo ! ReadResult(getData(key))
}
@@ -1166,23 +1207,22 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val localValue = getData(key.id)
Try {
localValue match {
- case Some(DataEnvelope(DeletedData, _)) ⇒ throw new DataDeleted(key, req)
- case Some(envelope @ DataEnvelope(existing, _)) ⇒
+ case Some(DataEnvelope(DeletedData, _, _)) ⇒ throw new DataDeleted(key, req)
+ case Some(envelope @ DataEnvelope(existing, _, _)) ⇒
modify(Some(existing)) match {
case d: DeltaReplicatedData if deltaCrdtEnabled ⇒
- (envelope.merge(d.resetDelta.asInstanceOf[existing.T]), Some(d.delta))
+ (envelope.merge(d.resetDelta.asInstanceOf[existing.T]), d.delta)
case d ⇒
(envelope.merge(d.asInstanceOf[existing.T]), None)
}
case None ⇒ modify(None) match {
- case d: DeltaReplicatedData if deltaCrdtEnabled ⇒ (DataEnvelope(d.resetDelta), Some(d.delta))
+ case d: DeltaReplicatedData if deltaCrdtEnabled ⇒ (DataEnvelope(d.resetDelta), d.delta)
case d ⇒ (DataEnvelope(d), None)
}
}
} match {
case Success((envelope, delta)) ⇒
log.debug("Received Update for key [{}], old data [{}], new data [{}], delta [{}]", key, localValue, envelope.data, delta)
- setData(key.id, envelope)
// handle the delta
delta match {
@@ -1190,23 +1230,28 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
case None ⇒ // not DeltaReplicatedData
}
+ // note that it's important to do deltaPropagationSelector.update before setData,
+ // so that the latest delta version is used
+ val newEnvelope = setData(key.id, envelope)
+
val durable = isDurable(key.id)
if (isLocalUpdate(writeConsistency)) {
if (durable)
- durableStore ! Store(key.id, new DurableDataEnvelope(envelope),
+ durableStore ! Store(key.id, new DurableDataEnvelope(newEnvelope),
Some(StoreReply(UpdateSuccess(key, req), StoreFailure(key, req), replyTo)))
else
replyTo ! UpdateSuccess(key, req)
} else {
val writeEnvelope = delta match {
- case Some(d) ⇒ DataEnvelope(d)
- case None ⇒ envelope
+ case Some(d: RequiresCausalDeliveryOfDeltas) ⇒ newEnvelope
+ case Some(d) ⇒ DataEnvelope(d)
+ case None ⇒ newEnvelope
}
val writeAggregator =
context.actorOf(WriteAggregator.props(key, writeEnvelope, writeConsistency, req, nodes, unreachable, replyTo, durable)
.withDispatcher(context.props.dispatcher))
if (durable) {
- durableStore ! Store(key.id, new DurableDataEnvelope(envelope),
+ durableStore ! Store(key.id, new DurableDataEnvelope(newEnvelope),
Some(StoreReply(UpdateSuccess(key, req), StoreFailure(key, req), writeAggregator)))
}
}
@@ -1219,7 +1264,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
}
}
- def isDurable(key: String): Boolean =
+ def isDurable(key: KeyId): Boolean =
durable(key) || (durableWildcards.nonEmpty && durableWildcards.exists(key.startsWith))
def isLocalUpdate(writeConsistency: WriteConsistency): Boolean =
@@ -1229,7 +1274,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
case _ ⇒ false
}
- def receiveWrite(key: String, envelope: DataEnvelope): Unit = {
+ def receiveWrite(key: KeyId, envelope: DataEnvelope): Unit = {
write(key, envelope) match {
case Some(newEnvelope) ⇒
if (isDurable(key))
@@ -1240,27 +1285,38 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
}
}
- def write(key: String, writeEnvelope: DataEnvelope): Option[DataEnvelope] =
+ def write(key: KeyId, writeEnvelope: DataEnvelope): Option[DataEnvelope] = {
getData(key) match {
- case Some(DataEnvelope(DeletedData, _)) ⇒ Some(DeletedEnvelope) // already deleted
- case Some(envelope @ DataEnvelope(existing, _)) ⇒
- if (existing.getClass == writeEnvelope.data.getClass || writeEnvelope.data == DeletedData) {
+ case someEnvelope @ Some(envelope) if envelope eq writeEnvelope ⇒ someEnvelope
+ case Some(DataEnvelope(DeletedData, _, _)) ⇒ Some(DeletedEnvelope) // already deleted
+ case Some(envelope @ DataEnvelope(existing, _, _)) ⇒
+ try {
+ // DataEnvelope will mergeDelta when needed
val merged = envelope.merge(writeEnvelope).addSeen(selfAddress)
- setData(key, merged)
- Some(merged)
- } else {
- log.warning(
- "Wrong type for writing [{}], existing type [{}], got [{}]",
- key, existing.getClass.getName, writeEnvelope.data.getClass.getName)
- None
+ Some(setData(key, merged))
+ } catch {
+ case e: IllegalArgumentException ⇒
+ log.warning(
+ "Couldn't merge [{}], due to: {}", key, e.getMessage)
+ None
}
case None ⇒
- val writeEnvelope2 = writeEnvelope.addSeen(selfAddress)
- setData(key, writeEnvelope2)
- Some(writeEnvelope2)
- }
+ // no existing data for the key
+ val writeEnvelope2 =
+ writeEnvelope.data match {
+ case d: ReplicatedDelta ⇒
+ val z = d.zero
+ writeEnvelope.copy(data = z.mergeDelta(d.asInstanceOf[z.D]))
+ case _ ⇒
+ writeEnvelope
+ }
- def writeAndStore(key: String, writeEnvelope: DataEnvelope): Unit = {
+ val writeEnvelope3 = writeEnvelope2.addSeen(selfAddress)
+ Some(setData(key, writeEnvelope3))
+ }
+ }
+
+ def writeAndStore(key: KeyId, writeEnvelope: DataEnvelope): Unit = {
write(key, writeEnvelope) match {
case Some(newEnvelope) ⇒
if (isDurable(key))
@@ -1269,21 +1325,21 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
}
}
- def receiveReadRepair(key: String, writeEnvelope: DataEnvelope): Unit = {
+ def receiveReadRepair(key: KeyId, writeEnvelope: DataEnvelope): Unit = {
writeAndStore(key, writeEnvelope)
replyTo ! ReadRepairAck
}
def receiveGetKeyIds(): Unit = {
- val keys: Set[String] = dataEntries.collect {
- case (key, (DataEnvelope(data, _), _)) if data != DeletedData ⇒ key
+ val keys: Set[KeyId] = dataEntries.collect {
+ case (key, (DataEnvelope(data, _, _), _)) if data != DeletedData ⇒ key
}(collection.breakOut)
replyTo ! GetKeyIdsResult(keys)
}
def receiveDelete(key: KeyR, consistency: WriteConsistency, req: Option[Any]): Unit = {
getData(key.id) match {
- case Some(DataEnvelope(DeletedData, _)) ⇒
+ case Some(DataEnvelope(DeletedData, _, _)) ⇒
// already deleted
replyTo ! DataDeleted(key, req)
case _ ⇒
@@ -1307,23 +1363,35 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
}
}
- def setData(key: String, envelope: DataEnvelope): Unit = {
+ def setData(key: KeyId, envelope: DataEnvelope): DataEnvelope = {
+ val newEnvelope = {
+ if (deltaCrdtEnabled) {
+ val deltaVersions = envelope.deltaVersions
+ val currVersion = deltaPropagationSelector.currentVersion(key)
+ if (currVersion == 0L || currVersion == deltaVersions.versionAt(selfUniqueAddress))
+ envelope
+ else
+ envelope.copy(deltaVersions = deltaVersions.merge(VersionVector(selfUniqueAddress, currVersion)))
+ } else envelope
+ }
+
val dig =
if (subscribers.contains(key) && !changed.contains(key)) {
val oldDigest = getDigest(key)
- val dig = digest(envelope)
+ val dig = digest(newEnvelope)
if (dig != oldDigest)
changed += key // notify subscribers, later
dig
- } else if (envelope.data == DeletedData) DeletedDigest
+ } else if (newEnvelope.data == DeletedData) DeletedDigest
else LazyDigest
- dataEntries = dataEntries.updated(key, (envelope, dig))
- if (envelope.data == DeletedData)
+ dataEntries = dataEntries.updated(key, (newEnvelope, dig))
+ if (newEnvelope.data == DeletedData)
deltaPropagationSelector.delete(key)
+ newEnvelope
}
- def getDigest(key: String): Digest = {
+ def getDigest(key: KeyId): Digest = {
dataEntries.get(key) match {
case Some((envelope, LazyDigest)) ⇒
val d = digest(envelope)
@@ -1337,14 +1405,27 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def digest(envelope: DataEnvelope): Digest =
if (envelope.data == DeletedData) DeletedDigest
else {
- val bytes = serializer.toBinary(envelope)
+ val bytes = serializer.toBinary(envelope.withoutDeltaVersions)
ByteString.fromArray(MessageDigest.getInstance("SHA-1").digest(bytes))
}
- def getData(key: String): Option[DataEnvelope] = dataEntries.get(key).map { case (envelope, _) ⇒ envelope }
+ def getData(key: KeyId): Option[DataEnvelope] = dataEntries.get(key).map { case (envelope, _) ⇒ envelope }
+
+ def getDeltaSeqNr(key: KeyId, fromNode: UniqueAddress): Long =
+ dataEntries.get(key) match {
+ case Some((DataEnvelope(_, _, deltaVersions), _)) ⇒ deltaVersions.versionAt(fromNode)
+ case None ⇒ 0L
+ }
+
+ def isNodeRemoved(node: UniqueAddress, keys: Iterable[KeyId]): Boolean = {
+ removedNodes.contains(node) || (keys.exists(key ⇒ dataEntries.get(key) match {
+ case Some((DataEnvelope(_, pruning, _), _)) ⇒ pruning.contains(node)
+ case None ⇒ false
+ }))
+ }
def receiveFlushChanges(): Unit = {
- def notify(keyId: String, subs: mutable.Set[ActorRef]): Unit = {
+ def notify(keyId: KeyId, subs: mutable.Set[ActorRef]): Unit = {
val key = subscriptionKeys(keyId)
getData(keyId) match {
case Some(envelope) ⇒
@@ -1369,7 +1450,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
newSubscribers.clear()
}
- changed = Set.empty[String]
+ changed = Set.empty[KeyId]
}
def receiveDeltaPropagationTick(): Unit = {
@@ -1378,17 +1459,55 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
// TODO split it to several DeltaPropagation if too many entries
replica(node) ! deltaPropagation
}
- if (deltaPropagationSelector.propagationCount % deltaPropagationSelector.divisor == 0)
+
+ if (deltaPropagationSelector.propagationCount % deltaPropagationSelector.gossipIntervalDivisor == 0)
deltaPropagationSelector.cleanupDeltaEntries()
}
- def receiveDeltaPropagation(deltas: Map[String, DataEnvelope]): Unit = {
- if (log.isDebugEnabled)
- log.debug("Received DeltaPropagation from [{}], containing [{}]", sender().path.address, deltas.keys.mkString(", "))
- deltas.foreach {
- case (key, envelope) ⇒ writeAndStore(key, envelope)
+ def receiveDeltaPropagation(fromNode: UniqueAddress, deltas: Map[KeyId, Delta]): Unit =
+ if (deltaCrdtEnabled) {
+ try {
+ val isDebugEnabled = log.isDebugEnabled
+ if (isDebugEnabled)
+ log.debug("Received DeltaPropagation from [{}], containing [{}]", fromNode.address,
+ deltas.collect { case (key, Delta(_, fromSeqNr, toSeqNr)) ⇒ s"$key $fromSeqNr-$toSeqNr" }.mkString(", "))
+
+ if (isNodeRemoved(fromNode, deltas.keys)) {
+ // Late message from a removed node.
+ // Drop it to avoid merging deltas that have been pruned on one side.
+ if (isDebugEnabled) log.debug(
+ "Skipping DeltaPropagation from [{}] because that node has been removed", fromNode.address)
+ } else {
+ deltas.foreach {
+ case (key, Delta(envelope @ DataEnvelope(_: RequiresCausalDeliveryOfDeltas, _, _), fromSeqNr, toSeqNr)) ⇒
+ val currentSeqNr = getDeltaSeqNr(key, fromNode)
+ if (currentSeqNr >= toSeqNr) {
+ if (isDebugEnabled) log.debug(
+ "Skipping DeltaPropagation from [{}] for [{}] because toSeqNr [{}] already handled [{}]",
+ fromNode.address, key, toSeqNr, currentSeqNr)
+ } else if (fromSeqNr > (currentSeqNr + 1)) {
+ if (isDebugEnabled) log.debug(
+ "Skipping DeltaPropagation from [{}] for [{}] because missing deltas between [{}-{}]",
+ fromNode.address, key, currentSeqNr + 1, fromSeqNr - 1)
+ } else {
+ if (isDebugEnabled) log.debug(
+ "Applying DeltaPropagation from [{}] for [{}] with sequence numbers [{}], current was [{}]",
+ fromNode.address, key, s"$fromSeqNr-$toSeqNr", currentSeqNr)
+ val newEnvelope = envelope.copy(deltaVersions = VersionVector(fromNode, toSeqNr))
+ writeAndStore(key, newEnvelope)
+ }
+ case (key, Delta(envelope, _, _)) ⇒
+ // causal delivery of deltas not needed, just apply it
+ writeAndStore(key, envelope)
+ }
+ }
+ } catch {
+ case NonFatal(e) ⇒
+ // catching in case we need to support rolling upgrades that are
+ // mixing nodes with incompatible delta-CRDT types
+ log.warning("Couldn't process DeltaPropagation from [] due to {}", fromNode, e)
+ }
}
- }
def receiveGossipTick(): Unit = {
if (fullStateGossipEnabled)
@@ -1424,12 +1543,12 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def replica(address: Address): ActorSelection =
context.actorSelection(self.path.toStringWithAddress(address))
- def receiveStatus(otherDigests: Map[String, Digest], chunk: Int, totChunks: Int): Unit = {
+ def receiveStatus(otherDigests: Map[KeyId, Digest], chunk: Int, totChunks: Int): Unit = {
if (log.isDebugEnabled)
log.debug("Received gossip status from [{}], chunk [{}] of [{}] containing [{}]", replyTo.path.address,
(chunk + 1), totChunks, otherDigests.keys.mkString(", "))
- def isOtherDifferent(key: String, otherDigest: Digest): Boolean = {
+ def isOtherDifferent(key: KeyId, otherDigest: Digest): Boolean = {
val d = getDigest(key)
d != NotFoundDigest && d != otherDigest
}
@@ -1457,10 +1576,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
}
}
- def receiveGossip(updatedData: Map[String, DataEnvelope], sendBack: Boolean): Unit = {
+ def receiveGossip(updatedData: Map[KeyId, DataEnvelope], sendBack: Boolean): Unit = {
if (log.isDebugEnabled)
log.debug("Received gossip from [{}], containing [{}]", replyTo.path.address, updatedData.keys.mkString(", "))
- var replyData = Map.empty[String, DataEnvelope]
+ var replyData = Map.empty[KeyId, DataEnvelope]
updatedData.foreach {
case (key, envelope) ⇒
val hadData = dataEntries.contains(key)
@@ -1568,7 +1687,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val knownNodes = nodes union weaklyUpNodes union removedNodes.keySet.map(_.address)
val newRemovedNodes =
dataEntries.foldLeft(Set.empty[UniqueAddress]) {
- case (acc, (_, (envelope @ DataEnvelope(data: RemovedNodePruning, _), _))) ⇒
+ case (acc, (_, (envelope @ DataEnvelope(data: RemovedNodePruning, _, _), _))) ⇒
acc union data.modifiedByNodes.filterNot(n ⇒ n == selfUniqueAddress || knownNodes(n.address))
case (acc, _) ⇒
acc
@@ -1616,7 +1735,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val pruningPerformed = PruningPerformed(System.currentTimeMillis() + pruningMarkerTimeToLive.toMillis)
val durablePruningPerformed = PruningPerformed(System.currentTimeMillis() + durablePruningMarkerTimeToLive.toMillis)
dataEntries.foreach {
- case (key, (envelope @ DataEnvelope(data: RemovedNodePruning, pruning), _)) ⇒
+ case (key, (envelope @ DataEnvelope(data: RemovedNodePruning, pruning, _), _)) ⇒
pruning.foreach {
case (removed, PruningInitialized(owner, seen)) if owner == selfUniqueAddress
&& (allNodes.isEmpty || allNodes.forall(seen)) ⇒
@@ -1634,7 +1753,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def deleteObsoletePruningPerformed(): Unit = {
val currentTime = System.currentTimeMillis()
dataEntries.foreach {
- case (key, (envelope @ DataEnvelope(_: RemovedNodePruning, pruning), _)) ⇒
+ case (key, (envelope @ DataEnvelope(_: RemovedNodePruning, pruning, _), _)) ⇒
val newEnvelope = pruning.foldLeft(envelope) {
case (acc, (removed, p: PruningPerformed)) if p.isObsolete(currentTime) ⇒
log.debug("Removing obsolete pruning marker for [{}] in [{}]", removed, key)
@@ -1660,7 +1779,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
/**
* INTERNAL API
*/
-private[akka] object ReadWriteAggregator {
+@InternalApi private[akka] object ReadWriteAggregator {
case object SendToSecondary
val MaxSecondaryNodes = 10
@@ -1678,7 +1797,7 @@ private[akka] object ReadWriteAggregator {
/**
* INTERNAL API
*/
-private[akka] abstract class ReadWriteAggregator extends Actor {
+@InternalApi private[akka] abstract class ReadWriteAggregator extends Actor {
import ReadWriteAggregator._
def timeout: FiniteDuration
@@ -1719,7 +1838,7 @@ private[akka] abstract class ReadWriteAggregator extends Actor {
/**
* INTERNAL API
*/
-private[akka] object WriteAggregator {
+@InternalApi private[akka] object WriteAggregator {
def props(
key: KeyR,
envelope: Replicator.Internal.DataEnvelope,
@@ -1736,7 +1855,7 @@ private[akka] object WriteAggregator {
/**
* INTERNAL API
*/
-private[akka] class WriteAggregator(
+@InternalApi private[akka] class WriteAggregator(
key: KeyR,
envelope: Replicator.Internal.DataEnvelope,
consistency: Replicator.WriteConsistency,
@@ -1826,7 +1945,7 @@ private[akka] class WriteAggregator(
/**
* INTERNAL API
*/
-private[akka] object ReadAggregator {
+@InternalApi private[akka] object ReadAggregator {
def props(
key: KeyR,
consistency: Replicator.ReadConsistency,
@@ -1843,7 +1962,7 @@ private[akka] object ReadAggregator {
/**
* INTERNAL API
*/
-private[akka] class ReadAggregator(
+@InternalApi private[akka] class ReadAggregator(
key: KeyR,
consistency: Replicator.ReadConsistency,
req: Option[Any],
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala
index e7b126047a..1fc98d8a92 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala
@@ -9,6 +9,7 @@ import scala.collection.immutable.TreeMap
import akka.cluster.Cluster
import akka.cluster.UniqueAddress
import akka.cluster.UniqueAddress
+import akka.annotation.InternalApi
/**
* VersionVector module with helper classes and methods.
@@ -28,7 +29,7 @@ object VersionVector {
def apply(node: UniqueAddress, version: Long): VersionVector = OneVersionVector(node, version)
/** INTERNAL API */
- private[akka] def apply(versions: List[(UniqueAddress, Long)]): VersionVector =
+ @InternalApi private[akka] def apply(versions: List[(UniqueAddress, Long)]): VersionVector =
if (versions.isEmpty) empty
else if (versions.tail.isEmpty) apply(versions.head._1, versions.head._2)
else apply(emptyVersions ++ versions)
@@ -69,7 +70,7 @@ object VersionVector {
def ConcurrentInstance = Concurrent
/** INTERNAL API */
- private[akka] object Timestamp {
+ @InternalApi private[akka] object Timestamp {
final val Zero = 0L
final val EndMarker = Long.MinValue
val counter = new AtomicLong(1L)
@@ -111,7 +112,7 @@ sealed abstract class VersionVector
* INTERNAL API
* Increment the version for the node passed as argument. Returns a new VersionVector.
*/
- private[akka] def +(node: UniqueAddress): VersionVector = increment(node)
+ @InternalApi private[akka] def +(node: UniqueAddress): VersionVector = increment(node)
/**
* Increment the version for the node passed as argument. Returns a new VersionVector.
@@ -123,23 +124,23 @@ sealed abstract class VersionVector
/**
* INTERNAL API
*/
- private[akka] def size: Int
+ @InternalApi private[akka] def size: Int
/**
* INTERNAL API
* Increment the version for the node passed as argument. Returns a new VersionVector.
*/
- private[akka] def increment(node: UniqueAddress): VersionVector
+ @InternalApi private[akka] def increment(node: UniqueAddress): VersionVector
/**
* INTERNAL API
*/
- private[akka] def versionAt(node: UniqueAddress): Long
+ @InternalApi private[akka] def versionAt(node: UniqueAddress): Long
/**
* INTERNAL API
*/
- private[akka] def contains(node: UniqueAddress): Boolean
+ @InternalApi private[akka] def contains(node: UniqueAddress): Boolean
/**
* Returns true if this and that are concurrent else false.
@@ -221,7 +222,7 @@ sealed abstract class VersionVector
/**
* INTERNAL API
*/
- private[akka] def versionsIterator: Iterator[(UniqueAddress, Long)]
+ @InternalApi private[akka] def versionsIterator: Iterator[(UniqueAddress, Long)]
/**
* Compare two version vectors. The outcome will be one of the following:
@@ -256,26 +257,26 @@ final case class OneVersionVector private[akka] (node: UniqueAddress, version: L
override def isEmpty: Boolean = false
/** INTERNAL API */
- private[akka] override def size: Int = 1
+ @InternalApi private[akka] override def size: Int = 1
/** INTERNAL API */
- private[akka] override def increment(n: UniqueAddress): VersionVector = {
+ @InternalApi private[akka] override def increment(n: UniqueAddress): VersionVector = {
val v = Timestamp.counter.getAndIncrement()
if (n == node) copy(version = v)
else ManyVersionVector(TreeMap(node → version, n → v))
}
/** INTERNAL API */
- private[akka] override def versionAt(n: UniqueAddress): Long =
+ @InternalApi private[akka] override def versionAt(n: UniqueAddress): Long =
if (n == node) version
else Timestamp.Zero
/** INTERNAL API */
- private[akka] override def contains(n: UniqueAddress): Boolean =
+ @InternalApi private[akka] override def contains(n: UniqueAddress): Boolean =
n == node
/** INTERNAL API */
- private[akka] override def versionsIterator: Iterator[(UniqueAddress, Long)] =
+ @InternalApi private[akka] override def versionsIterator: Iterator[(UniqueAddress, Long)] =
Iterator.single((node, version))
override def merge(that: VersionVector): VersionVector = {
@@ -315,30 +316,32 @@ final case class ManyVersionVector(versions: TreeMap[UniqueAddress, Long]) exten
override def isEmpty: Boolean = versions.isEmpty
/** INTERNAL API */
- private[akka] override def size: Int = versions.size
+ @InternalApi private[akka] override def size: Int = versions.size
/** INTERNAL API */
- private[akka] override def increment(node: UniqueAddress): VersionVector = {
+ @InternalApi private[akka] override def increment(node: UniqueAddress): VersionVector = {
val v = Timestamp.counter.getAndIncrement()
VersionVector(versions.updated(node, v))
}
/** INTERNAL API */
- private[akka] override def versionAt(node: UniqueAddress): Long = versions.get(node) match {
+ @InternalApi private[akka] override def versionAt(node: UniqueAddress): Long = versions.get(node) match {
case Some(v) ⇒ v
case None ⇒ Timestamp.Zero
}
/** INTERNAL API */
- private[akka] override def contains(node: UniqueAddress): Boolean =
+ @InternalApi private[akka] override def contains(node: UniqueAddress): Boolean =
versions.contains(node)
/** INTERNAL API */
- private[akka] override def versionsIterator: Iterator[(UniqueAddress, Long)] =
+ @InternalApi private[akka] override def versionsIterator: Iterator[(UniqueAddress, Long)] =
versions.iterator
override def merge(that: VersionVector): VersionVector = {
- that match {
+ if (that.isEmpty) this
+ else if (this.isEmpty) that
+ else that match {
case ManyVersionVector(vs2) ⇒
var mergedVersions = vs2
for ((node, time) ← versions) {
@@ -366,7 +369,8 @@ final case class ManyVersionVector(versions: TreeMap[UniqueAddress, Long]) exten
VersionVector(versions = versions - removedNode) + collapseInto
override def pruningCleanup(removedNode: UniqueAddress): VersionVector =
- VersionVector(versions = versions - removedNode)
+ if (versions.contains(removedNode)) VersionVector(versions = versions - removedNode)
+ else this
override def toString: String =
versions.map { case ((n, v)) ⇒ n + " -> " + v }.mkString("VersionVector(", ", ", ")")
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala
index b6bb56f8d7..d65c372c26 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala
@@ -25,6 +25,7 @@ import scala.collection.immutable.TreeMap
import akka.cluster.UniqueAddress
import java.io.NotSerializableException
import akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage
+import akka.cluster.ddata.ORSet.DeltaOp
private object ReplicatedDataSerializer {
/*
@@ -163,6 +164,10 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
private val GSetKeyManifest = "b"
private val ORSetManifest = "C"
private val ORSetKeyManifest = "c"
+ private val ORSetAddManifest = "Ca"
+ private val ORSetRemoveManifest = "Cr"
+ private val ORSetFullManifest = "Cf"
+ private val ORSetDeltaGroupManifest = "Cg"
private val FlagManifest = "D"
private val FlagKeyManifest = "d"
private val LWWRegisterManifest = "E"
@@ -184,6 +189,10 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef](
GSetManifest → gsetFromBinary,
ORSetManifest → orsetFromBinary,
+ ORSetAddManifest → orsetAddFromBinary,
+ ORSetRemoveManifest → orsetRemoveFromBinary,
+ ORSetFullManifest → orsetFullFromBinary,
+ ORSetDeltaGroupManifest → orsetDeltaGroupFromBinary,
FlagManifest → flagFromBinary,
LWWRegisterManifest → lwwRegisterFromBinary,
GCounterManifest → gcounterFromBinary,
@@ -207,48 +216,57 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
ORMultiMapKeyManifest → (bytes ⇒ ORMultiMapKey(keyIdFromBinary(bytes))))
override def manifest(obj: AnyRef): String = obj match {
- case _: ORSet[_] ⇒ ORSetManifest
- case _: GSet[_] ⇒ GSetManifest
- case _: GCounter ⇒ GCounterManifest
- case _: PNCounter ⇒ PNCounterManifest
- case _: Flag ⇒ FlagManifest
- case _: LWWRegister[_] ⇒ LWWRegisterManifest
- case _: ORMap[_, _] ⇒ ORMapManifest
- case _: LWWMap[_, _] ⇒ LWWMapManifest
- case _: PNCounterMap[_] ⇒ PNCounterMapManifest
- case _: ORMultiMap[_, _] ⇒ ORMultiMapManifest
- case DeletedData ⇒ DeletedDataManifest
- case _: VersionVector ⇒ VersionVectorManifest
+ case _: ORSet[_] ⇒ ORSetManifest
+ case _: ORSet.AddDeltaOp[_] ⇒ ORSetAddManifest
+ case _: ORSet.RemoveDeltaOp[_] ⇒ ORSetRemoveManifest
+ case _: GSet[_] ⇒ GSetManifest
+ case _: GCounter ⇒ GCounterManifest
+ case _: PNCounter ⇒ PNCounterManifest
+ case _: Flag ⇒ FlagManifest
+ case _: LWWRegister[_] ⇒ LWWRegisterManifest
+ case _: ORMap[_, _] ⇒ ORMapManifest
+ case _: LWWMap[_, _] ⇒ LWWMapManifest
+ case _: PNCounterMap[_] ⇒ PNCounterMapManifest
+ case _: ORMultiMap[_, _] ⇒ ORMultiMapManifest
+ case DeletedData ⇒ DeletedDataManifest
+ case _: VersionVector ⇒ VersionVectorManifest
- case _: ORSetKey[_] ⇒ ORSetKeyManifest
- case _: GSetKey[_] ⇒ GSetKeyManifest
- case _: GCounterKey ⇒ GCounterKeyManifest
- case _: PNCounterKey ⇒ PNCounterKeyManifest
- case _: FlagKey ⇒ FlagKeyManifest
- case _: LWWRegisterKey[_] ⇒ LWWRegisterKeyManifest
- case _: ORMapKey[_, _] ⇒ ORMapKeyManifest
- case _: LWWMapKey[_, _] ⇒ LWWMapKeyManifest
- case _: PNCounterMapKey[_] ⇒ PNCounterMapKeyManifest
- case _: ORMultiMapKey[_, _] ⇒ ORMultiMapKeyManifest
+ case _: ORSetKey[_] ⇒ ORSetKeyManifest
+ case _: GSetKey[_] ⇒ GSetKeyManifest
+ case _: GCounterKey ⇒ GCounterKeyManifest
+ case _: PNCounterKey ⇒ PNCounterKeyManifest
+ case _: FlagKey ⇒ FlagKeyManifest
+ case _: LWWRegisterKey[_] ⇒ LWWRegisterKeyManifest
+ case _: ORMapKey[_, _] ⇒ ORMapKeyManifest
+ case _: LWWMapKey[_, _] ⇒ LWWMapKeyManifest
+ case _: PNCounterMapKey[_] ⇒ PNCounterMapKeyManifest
+ case _: ORMultiMapKey[_, _] ⇒ ORMultiMapKeyManifest
+
+ case _: ORSet.DeltaGroup[_] ⇒ ORSetDeltaGroupManifest
+ case _: ORSet.FullStateDeltaOp[_] ⇒ ORSetFullManifest
case _ ⇒
throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]")
}
def toBinary(obj: AnyRef): Array[Byte] = obj match {
- case m: ORSet[_] ⇒ compress(orsetToProto(m))
- case m: GSet[_] ⇒ gsetToProto(m).toByteArray
- case m: GCounter ⇒ gcounterToProto(m).toByteArray
- case m: PNCounter ⇒ pncounterToProto(m).toByteArray
- case m: Flag ⇒ flagToProto(m).toByteArray
- case m: LWWRegister[_] ⇒ lwwRegisterToProto(m).toByteArray
- case m: ORMap[_, _] ⇒ compress(ormapToProto(m))
- case m: LWWMap[_, _] ⇒ compress(lwwmapToProto(m))
- case m: PNCounterMap[_] ⇒ compress(pncountermapToProto(m))
- case m: ORMultiMap[_, _] ⇒ compress(multimapToProto(m))
- case DeletedData ⇒ dm.Empty.getDefaultInstance.toByteArray
- case m: VersionVector ⇒ versionVectorToProto(m).toByteArray
- case Key(id) ⇒ keyIdToBinary(id)
+ case m: ORSet[_] ⇒ compress(orsetToProto(m))
+ case m: ORSet.AddDeltaOp[_] ⇒ orsetToProto(m.underlying).toByteArray
+ case m: ORSet.RemoveDeltaOp[_] ⇒ orsetToProto(m.underlying).toByteArray
+ case m: GSet[_] ⇒ gsetToProto(m).toByteArray
+ case m: GCounter ⇒ gcounterToProto(m).toByteArray
+ case m: PNCounter ⇒ pncounterToProto(m).toByteArray
+ case m: Flag ⇒ flagToProto(m).toByteArray
+ case m: LWWRegister[_] ⇒ lwwRegisterToProto(m).toByteArray
+ case m: ORMap[_, _] ⇒ compress(ormapToProto(m))
+ case m: LWWMap[_, _] ⇒ compress(lwwmapToProto(m))
+ case m: PNCounterMap[_] ⇒ compress(pncountermapToProto(m))
+ case m: ORMultiMap[_, _] ⇒ compress(multimapToProto(m))
+ case DeletedData ⇒ dm.Empty.getDefaultInstance.toByteArray
+ case m: VersionVector ⇒ versionVectorToProto(m).toByteArray
+ case Key(id) ⇒ keyIdToBinary(id)
+ case m: ORSet.DeltaGroup[_] ⇒ orsetDeltaGroupToProto(m).toByteArray
+ case m: ORSet.FullStateDeltaOp[_] ⇒ orsetToProto(m.underlying).toByteArray
case _ ⇒
throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]")
}
@@ -362,6 +380,52 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def orsetFromBinary(bytes: Array[Byte]): ORSet[Any] =
orsetFromProto(rd.ORSet.parseFrom(decompress(bytes)))
+ private def orsetAddFromBinary(bytes: Array[Byte]): ORSet.AddDeltaOp[Any] =
+ new ORSet.AddDeltaOp(orsetFromProto(rd.ORSet.parseFrom(bytes)))
+
+ private def orsetRemoveFromBinary(bytes: Array[Byte]): ORSet.RemoveDeltaOp[Any] =
+ new ORSet.RemoveDeltaOp(orsetFromProto(rd.ORSet.parseFrom(bytes)))
+
+ private def orsetFullFromBinary(bytes: Array[Byte]): ORSet.FullStateDeltaOp[Any] =
+ new ORSet.FullStateDeltaOp(orsetFromProto(rd.ORSet.parseFrom(bytes)))
+
+ private def orsetDeltaGroupToProto(deltaGroup: ORSet.DeltaGroup[_]): rd.ORSetDeltaGroup = {
+ def createEntry(opType: rd.ORSetDeltaOp, u: ORSet[_]) = {
+ rd.ORSetDeltaGroup.Entry.newBuilder()
+ .setOperation(opType)
+ .setUnderlying(orsetToProto(u))
+ }
+
+ val b = rd.ORSetDeltaGroup.newBuilder()
+ deltaGroup.ops.foreach {
+ case ORSet.AddDeltaOp(u) ⇒
+ b.addEntries(createEntry(rd.ORSetDeltaOp.Add, u))
+ case ORSet.RemoveDeltaOp(u) ⇒
+ b.addEntries(createEntry(rd.ORSetDeltaOp.Remove, u))
+ case ORSet.FullStateDeltaOp(u) ⇒
+ b.addEntries(createEntry(rd.ORSetDeltaOp.Full, u))
+ case ORSet.DeltaGroup(u) ⇒
+ throw new IllegalArgumentException("ORSet.DeltaGroup should not be nested")
+ }
+ b.build()
+ }
+
+ private def orsetDeltaGroupFromBinary(bytes: Array[Byte]): ORSet.DeltaGroup[Any] = {
+ val deltaGroup = rd.ORSetDeltaGroup.parseFrom(bytes)
+ val ops: Vector[ORSet.DeltaOp] =
+ deltaGroup.getEntriesList.asScala.map { entry ⇒
+ if (entry.getOperation == rd.ORSetDeltaOp.Add)
+ ORSet.AddDeltaOp(orsetFromProto(entry.getUnderlying))
+ else if (entry.getOperation == rd.ORSetDeltaOp.Remove)
+ ORSet.RemoveDeltaOp(orsetFromProto(entry.getUnderlying))
+ else if (entry.getOperation == rd.ORSetDeltaOp.Full)
+ ORSet.FullStateDeltaOp(orsetFromProto(entry.getUnderlying))
+ else
+ throw new NotSerializableException(s"Unknow ORSet delta operation ${entry.getOperation}")
+ }(collection.breakOut)
+ ORSet.DeltaGroup(ops)
+ }
+
def orsetFromProto(orset: rd.ORSet): ORSet[Any] = {
val elements: Iterator[Any] =
(orset.getStringElementsList.iterator.asScala ++
@@ -432,31 +496,6 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
decrements = gcounterFromProto(pncounter.getDecrements))
}
- def versionVectorToProto(versionVector: VersionVector): rd.VersionVector = {
- val b = rd.VersionVector.newBuilder()
- versionVector.versionsIterator.foreach {
- case (node, value) ⇒ b.addEntries(rd.VersionVector.Entry.newBuilder().
- setNode(uniqueAddressToProto(node)).setVersion(value))
- }
- b.build()
- }
-
- def versionVectorFromBinary(bytes: Array[Byte]): VersionVector =
- versionVectorFromProto(rd.VersionVector.parseFrom(bytes))
-
- def versionVectorFromProto(versionVector: rd.VersionVector): VersionVector = {
- val entries = versionVector.getEntriesList
- if (entries.isEmpty)
- VersionVector.empty
- else if (entries.size == 1)
- VersionVector(uniqueAddressFromProto(entries.get(0).getNode), entries.get(0).getVersion)
- else {
- val versions: TreeMap[UniqueAddress, Long] = versionVector.getEntriesList.asScala.map(entry ⇒
- uniqueAddressFromProto(entry.getNode) → entry.getVersion)(breakOut)
- VersionVector(versions)
- }
- }
-
/*
* Convert a Map[A, B] to an Iterable[Entry] where Entry is the protobuf map entry.
*/
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala
index 93e1202d29..c48b8f4750 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala
@@ -26,14 +26,15 @@ import java.util.concurrent.atomic.AtomicInteger
import scala.annotation.tailrec
import scala.concurrent.duration.FiniteDuration
import akka.cluster.ddata.DurableStore.DurableDataEnvelope
-import akka.cluster.ddata.DurableStore.DurableDataEnvelope
import java.io.NotSerializableException
import akka.actor.Address
+import akka.cluster.ddata.VersionVector
+import akka.annotation.InternalApi
/**
* INTERNAL API
*/
-private[akka] object ReplicatorMessageSerializer {
+@InternalApi private[akka] object ReplicatorMessageSerializer {
/**
* A cache that is designed for a small number (<= 32) of
@@ -287,11 +288,16 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def deltaPropagationToProto(deltaPropagation: DeltaPropagation): dm.DeltaPropagation = {
val b = dm.DeltaPropagation.newBuilder()
+ .setFromNode(uniqueAddressToProto(deltaPropagation.fromNode))
val entries = deltaPropagation.deltas.foreach {
- case (key, data) ⇒
- b.addEntries(dm.DeltaPropagation.Entry.newBuilder().
- setKey(key).
- setEnvelope(dataEnvelopeToProto(data)))
+ case (key, Delta(data, fromSeqNr, toSeqNr)) ⇒
+ val b2 = dm.DeltaPropagation.Entry.newBuilder()
+ .setKey(key)
+ .setEnvelope(dataEnvelopeToProto(data))
+ .setFromSeqNr(fromSeqNr)
+ if (toSeqNr != fromSeqNr)
+ b2.setToSeqNr(toSeqNr)
+ b.addEntries(b2)
}
b.build()
}
@@ -299,8 +305,12 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def deltaPropagationFromBinary(bytes: Array[Byte]): DeltaPropagation = {
val deltaPropagation = dm.DeltaPropagation.parseFrom(bytes)
DeltaPropagation(
- deltaPropagation.getEntriesList.asScala.map(e ⇒
- e.getKey → dataEnvelopeFromProto(e.getEnvelope))(breakOut))
+ uniqueAddressFromProto(deltaPropagation.getFromNode),
+ deltaPropagation.getEntriesList.asScala.map { e ⇒
+ val fromSeqNr = e.getFromSeqNr
+ val toSeqNr = if (e.hasToSeqNr) e.getToSeqNr else fromSeqNr
+ e.getKey → Delta(dataEnvelopeFromProto(e.getEnvelope), fromSeqNr, toSeqNr)
+ }(breakOut))
}
private def getToProto(get: Get[_]): dm.Get = {
@@ -434,6 +444,10 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
}
dataEnvelopeBuilder.addPruning(b)
}
+
+ if (!dataEnvelope.deltaVersions.isEmpty)
+ dataEnvelopeBuilder.setDeltaVersions(versionVectorToProto(dataEnvelope.deltaVersions))
+
dataEnvelopeBuilder.build()
}
@@ -443,7 +457,10 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def dataEnvelopeFromProto(dataEnvelope: dm.DataEnvelope): DataEnvelope = {
val data = otherMessageFromProto(dataEnvelope.getData).asInstanceOf[ReplicatedData]
val pruning = pruningFromProto(dataEnvelope.getPruningList)
- DataEnvelope(data, pruning)
+ val deltaVersions =
+ if (dataEnvelope.hasDeltaVersions) versionVectorFromProto(dataEnvelope.getDeltaVersions)
+ else VersionVector.empty
+ DataEnvelope(data, pruning, deltaVersions)
}
private def pruningFromProto(pruningEntries: java.util.List[dm.DataEnvelope.PruningEntry]): Map[UniqueAddress, PruningState] = {
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala
index b89a9ebcb3..e2995d44cd 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala
@@ -8,6 +8,9 @@ import java.io.ByteArrayOutputStream
import java.util.zip.GZIPInputStream
import java.util.zip.GZIPOutputStream
import scala.annotation.tailrec
+import scala.collection.immutable.TreeMap
+import scala.collection.JavaConverters._
+import scala.collection.breakOut
import akka.actor.ActorRef
import akka.actor.Address
import akka.actor.ExtendedActorSystem
@@ -19,6 +22,7 @@ import akka.serialization.SerializationExtension
import akka.protobuf.ByteString
import akka.protobuf.MessageLite
import akka.serialization.SerializerWithStringManifest
+import akka.cluster.ddata.VersionVector
/**
* Some useful serialization helper methods.
@@ -101,8 +105,32 @@ trait SerializationSupport {
} else {
// old remote node
uniqueAddress.getUid.toLong
- }
- )
+ })
+
+ def versionVectorToProto(versionVector: VersionVector): dm.VersionVector = {
+ val b = dm.VersionVector.newBuilder()
+ versionVector.versionsIterator.foreach {
+ case (node, value) ⇒ b.addEntries(dm.VersionVector.Entry.newBuilder().
+ setNode(uniqueAddressToProto(node)).setVersion(value))
+ }
+ b.build()
+ }
+
+ def versionVectorFromBinary(bytes: Array[Byte]): VersionVector =
+ versionVectorFromProto(dm.VersionVector.parseFrom(bytes))
+
+ def versionVectorFromProto(versionVector: dm.VersionVector): VersionVector = {
+ val entries = versionVector.getEntriesList
+ if (entries.isEmpty)
+ VersionVector.empty
+ else if (entries.size == 1)
+ VersionVector(uniqueAddressFromProto(entries.get(0).getNode), entries.get(0).getVersion)
+ else {
+ val versions: TreeMap[UniqueAddress, Long] = versionVector.getEntriesList.asScala.map(entry ⇒
+ uniqueAddressFromProto(entry.getNode) → entry.getVersion)(breakOut)
+ VersionVector(versions)
+ }
+ }
def resolveActorRef(path: String): ActorRef =
system.provider.resolveActorRef(path)
diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala
index d823090d10..8175307da9 100644
--- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala
+++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala
@@ -38,6 +38,8 @@ object PerformanceSpec extends MultiNodeConfig {
#akka.cluster.distributed-data.durable.keys = ["*"]
#akka.cluster.distributed-data.durable.lmdb.dir = target/PerformanceSpec-${System.currentTimeMillis}-ddata
#akka.cluster.distributed-data.durable.lmdb.write-behind-interval = 200ms
+
+ #akka.cluster.distributed-data.delta-crdt.enabled = off
"""))
def countDownProps(latch: TestLatch): Props = Props(new CountDown(latch)).withDeploy(Deploy.local)
diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorDeltaSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorDeltaSpec.scala
index ebb4265d93..44910f3afd 100644
--- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorDeltaSpec.scala
+++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorDeltaSpec.scala
@@ -22,6 +22,7 @@ object ReplicatorDeltaSpec extends MultiNodeConfig {
val fourth = role("fourth")
commonConfig(ConfigFactory.parseString("""
+ akka.loglevel = DEBUG
akka.actor.provider = "cluster"
akka.log-dead-letters-during-shutdown = off
"""))
@@ -32,6 +33,8 @@ object ReplicatorDeltaSpec extends MultiNodeConfig {
final case class Delay(n: Int) extends Op
final case class Incr(key: PNCounterKey, n: Int, consistency: WriteConsistency) extends Op
final case class Decr(key: PNCounterKey, n: Int, consistency: WriteConsistency) extends Op
+ final case class Add(key: ORSetKey[String], elem: String, consistency: WriteConsistency) extends Op
+ final case class Remove(key: ORSetKey[String], elem: String, consistency: WriteConsistency) extends Op
val timeout = 5.seconds
val writeTwo = WriteTo(2, timeout)
@@ -40,8 +43,11 @@ object ReplicatorDeltaSpec extends MultiNodeConfig {
val KeyA = PNCounterKey("A")
val KeyB = PNCounterKey("B")
val KeyC = PNCounterKey("C")
+ val KeyD = ORSetKey[String]("D")
+ val KeyE = ORSetKey[String]("E")
+ val KeyF = ORSetKey[String]("F")
- def generateOperations(): Vector[Op] = {
+ def generateOperations(onNode: RoleName): Vector[Op] = {
val rnd = ThreadLocalRandom.current()
def consistency(): WriteConsistency = {
@@ -52,7 +58,7 @@ object ReplicatorDeltaSpec extends MultiNodeConfig {
}
}
- def key(): PNCounterKey = {
+ def rndPnCounterkey(): PNCounterKey = {
rnd.nextInt(3) match {
case 0 ⇒ KeyA
case 1 ⇒ KeyB
@@ -60,11 +66,43 @@ object ReplicatorDeltaSpec extends MultiNodeConfig {
}
}
- (0 to (20 + rnd.nextInt(10))).map { _ ⇒
+ def rndOrSetkey(): ORSetKey[String] = {
rnd.nextInt(3) match {
+ case 0 ⇒ KeyD
+ case 1 ⇒ KeyE
+ case 2 ⇒ KeyF
+ }
+ }
+
+ var availableForRemove = Set.empty[String]
+
+ def rndAddElement(): String = {
+ // lower case a - j
+ val s = (97 + rnd.nextInt(10)).toChar.toString
+ availableForRemove += s
+ s
+ }
+
+ def rndRemoveElement(): String = {
+ if (availableForRemove.isEmpty)
+ "a"
+ else
+ availableForRemove.toVector(rnd.nextInt(availableForRemove.size))
+ }
+
+ (0 to (30 + rnd.nextInt(10))).map { _ ⇒
+ rnd.nextInt(4) match {
case 0 ⇒ Delay(rnd.nextInt(500))
- case 1 ⇒ Incr(key(), rnd.nextInt(100), consistency())
- case 2 ⇒ Decr(key(), rnd.nextInt(10), consistency())
+ case 1 ⇒ Incr(rndPnCounterkey(), rnd.nextInt(100), consistency())
+ case 2 ⇒ Decr(rndPnCounterkey(), rnd.nextInt(10), consistency())
+ case 3 ⇒
+ // ORSet
+ val key = rndOrSetkey()
+ // only removals for KeyF on node first
+ if (key == KeyF && onNode == first && rnd.nextBoolean())
+ Remove(key, rndRemoveElement(), consistency())
+ else
+ Add(key, rndAddElement(), consistency())
}
}.toVector
}
@@ -136,21 +174,32 @@ class ReplicatorDeltaSpec extends MultiNodeSpec(ReplicatorDeltaSpec) with STMult
enterBarrier("ready")
runOn(first) {
- fullStateReplicator ! Update(KeyA, PNCounter.empty, WriteLocal)(_ + 1)
- deltaReplicator ! Update(KeyA, PNCounter.empty, WriteLocal)(_ + 1)
+ // by setting something for each key we don't have to worry about NotFound
+ List(KeyA, KeyB, KeyC).foreach { key ⇒
+ fullStateReplicator ! Update(key, PNCounter.empty, WriteLocal)(_ + 1)
+ deltaReplicator ! Update(key, PNCounter.empty, WriteLocal)(_ + 1)
+ }
+ List(KeyD, KeyE, KeyF).foreach { key ⇒
+ fullStateReplicator ! Update(key, ORSet.empty[String], WriteLocal)(_ + "a")
+ deltaReplicator ! Update(key, ORSet.empty[String], WriteLocal)(_ + "a")
+ }
}
enterBarrier("updated-1")
within(5.seconds) {
awaitAssert {
val p = TestProbe()
- deltaReplicator.tell(Get(KeyA, ReadLocal), p.ref)
- p.expectMsgType[GetSuccess[PNCounter]].dataValue.getValue.intValue should be(1)
+ List(KeyA, KeyB, KeyC).foreach { key ⇒
+ fullStateReplicator.tell(Get(key, ReadLocal), p.ref)
+ p.expectMsgType[GetSuccess[PNCounter]].dataValue.getValue.intValue should be(1)
+ }
}
awaitAssert {
val p = TestProbe()
- deltaReplicator.tell(Get(KeyA, ReadLocal), p.ref)
- p.expectMsgType[GetSuccess[PNCounter]].dataValue.getValue.intValue should be(1)
+ List(KeyD, KeyE, KeyF).foreach { key ⇒
+ fullStateReplicator.tell(Get(key, ReadLocal), p.ref)
+ p.expectMsgType[GetSuccess[ORSet[String]]].dataValue.elements should ===(Set("a"))
+ }
}
}
@@ -158,7 +207,7 @@ class ReplicatorDeltaSpec extends MultiNodeSpec(ReplicatorDeltaSpec) with STMult
}
"be eventually consistent" in {
- val operations = generateOperations()
+ val operations = generateOperations(onNode = myself)
log.debug(s"random operations on [${myself.name}]: ${operations.mkString(", ")}")
try {
// perform random operations with both delta and full-state replicators
@@ -170,10 +219,22 @@ class ReplicatorDeltaSpec extends MultiNodeSpec(ReplicatorDeltaSpec) with STMult
case Delay(d) ⇒ Thread.sleep(d)
case Incr(key, n, consistency) ⇒
fullStateReplicator ! Update(key, PNCounter.empty, consistency)(_ + n)
- deltaReplicator ! Update(key, PNCounter.empty, WriteLocal)(_ + n)
+ deltaReplicator ! Update(key, PNCounter.empty, consistency)(_ + n)
case Decr(key, n, consistency) ⇒
fullStateReplicator ! Update(key, PNCounter.empty, consistency)(_ - n)
- deltaReplicator ! Update(key, PNCounter.empty, WriteLocal)(_ - n)
+ deltaReplicator ! Update(key, PNCounter.empty, consistency)(_ - n)
+ case Add(key, elem, consistency) ⇒
+ // to have an deterministic result when mixing add/remove we can only perform
+ // the ORSet operations from one node
+ runOn((if (key == KeyF) List(first) else List(first, second, third)): _*) {
+ fullStateReplicator ! Update(key, ORSet.empty[String], consistency)(_ + elem)
+ deltaReplicator ! Update(key, ORSet.empty[String], consistency)(_ + elem)
+ }
+ case Remove(key, elem, consistency) ⇒
+ runOn(first) {
+ fullStateReplicator ! Update(key, ORSet.empty[String], consistency)(_ - elem)
+ deltaReplicator ! Update(key, ORSet.empty[String], consistency)(_ - elem)
+ }
}
}
@@ -192,6 +253,19 @@ class ReplicatorDeltaSpec extends MultiNodeSpec(ReplicatorDeltaSpec) with STMult
}
}
+ List(KeyD, KeyE, KeyF).foreach { key ⇒
+ within(5.seconds) {
+ awaitAssert {
+ val p = TestProbe()
+ fullStateReplicator.tell(Get(key, ReadLocal), p.ref)
+ val fullStateValue = p.expectMsgType[GetSuccess[ORSet[String]]].dataValue.elements
+ deltaReplicator.tell(Get(key, ReadLocal), p.ref)
+ val deltaValue = p.expectMsgType[GetSuccess[ORSet[String]]].dataValue.elements
+ deltaValue should ===(fullStateValue)
+ }
+ }
+ }
+
enterBarrierAfterTestStep()
} catch {
case e: Throwable ⇒
diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorORSetDeltaSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorORSetDeltaSpec.scala
new file mode 100644
index 0000000000..04232f3f82
--- /dev/null
+++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorORSetDeltaSpec.scala
@@ -0,0 +1,163 @@
+/**
+ * Copyright (C) 2009-2017 Lightbend Inc.
+ */
+package akka.cluster.ddata
+
+import scala.concurrent.duration._
+
+import akka.cluster.Cluster
+import akka.remote.testconductor.RoleName
+import akka.remote.testkit.MultiNodeConfig
+import akka.remote.testkit.MultiNodeSpec
+import akka.remote.transport.ThrottlerTransportAdapter.Direction
+import akka.testkit._
+import com.typesafe.config.ConfigFactory
+
+object ReplicatorORSetDeltaSpec extends MultiNodeConfig {
+ val first = role("first")
+ val second = role("second")
+ val third = role("third")
+
+ commonConfig(ConfigFactory.parseString("""
+ akka.loglevel = DEBUG
+ akka.actor.provider = "cluster"
+ akka.log-dead-letters-during-shutdown = off
+ """))
+
+ testTransport(on = true)
+}
+
+class ReplicatorORSetDeltaSpecMultiJvmNode1 extends ReplicatorORSetDeltaSpec
+class ReplicatorORSetDeltaSpecMultiJvmNode2 extends ReplicatorORSetDeltaSpec
+class ReplicatorORSetDeltaSpecMultiJvmNode3 extends ReplicatorORSetDeltaSpec
+
+class ReplicatorORSetDeltaSpec extends MultiNodeSpec(ReplicatorORSetDeltaSpec) with STMultiNodeSpec with ImplicitSender {
+ import Replicator._
+ import ReplicatorORSetDeltaSpec._
+
+ override def initialParticipants = roles.size
+
+ implicit val cluster = Cluster(system)
+ val replicator = system.actorOf(Replicator.props(
+ ReplicatorSettings(system).withGossipInterval(1.second)), "replicator")
+ val timeout = 3.seconds.dilated
+
+ val KeyA = ORSetKey[String]("A")
+ val KeyB = ORSetKey[String]("B")
+ val KeyC = ORSetKey[String]("C")
+
+ def join(from: RoleName, to: RoleName): Unit = {
+ runOn(from) {
+ cluster join node(to).address
+ }
+ enterBarrier(from.name + "-joined")
+ }
+
+ def assertValue(key: Key[ReplicatedData], expected: Any): Unit =
+ within(10.seconds) {
+ awaitAssert {
+ replicator ! Get(key, ReadLocal)
+ val value = expectMsgPF() {
+ case g @ GetSuccess(`key`, _) ⇒ g.dataValue match {
+ case c: ORSet[_] ⇒ c.elements
+ }
+ }
+ value should be(expected)
+ }
+ }
+
+ "ORSet delta" must {
+
+ "replicate data in initial phase" in {
+ join(first, first)
+ join(second, first)
+ join(third, first)
+
+ replicator ! Replicator.Internal.TestFullStateGossip(enabled = false)
+
+ within(10.seconds) {
+ awaitAssert {
+ replicator ! GetReplicaCount
+ expectMsg(ReplicaCount(3))
+ }
+ }
+
+ runOn(first) {
+ replicator ! Update(KeyA, ORSet.empty[String], WriteLocal)(_ + "a")
+ expectMsg(UpdateSuccess(KeyA, None))
+ }
+
+ enterBarrier("initial-updates-done")
+
+ assertValue(KeyA, Set("a"))
+
+ enterBarrier("after-1")
+ }
+
+ "be propagated with causal consistency during network split" in {
+ runOn(first) {
+ // third is isolated
+ testConductor.blackhole(first, third, Direction.Both).await
+ testConductor.blackhole(second, third, Direction.Both).await
+ }
+ enterBarrier("split")
+
+ runOn(first) {
+ replicator ! Update(KeyA, ORSet.empty[String], WriteLocal)(_ + "b")
+ expectMsg(UpdateSuccess(KeyA, None))
+ }
+ runOn(second) {
+ replicator ! Update(KeyA, ORSet.empty[String], WriteLocal)(_ + "d")
+ expectMsg(UpdateSuccess(KeyA, None))
+ }
+ runOn(first, second) {
+ assertValue(KeyA, Set("a", "b", "d"))
+ Thread.sleep(2000) // all deltas sent
+ }
+ enterBarrier("added-b-and-d")
+
+ runOn(first) {
+ testConductor.passThrough(first, third, Direction.Both).await
+ testConductor.passThrough(second, third, Direction.Both).await
+ }
+ enterBarrier("healed")
+
+ runOn(first) {
+ // delta for "c" will be sent to third, but it has not received the previous delta for "b"
+ replicator ! Update(KeyA, ORSet.empty[String], WriteLocal)(_ + "c")
+ expectMsg(UpdateSuccess(KeyA, None))
+ // let the delta be propagated (will not fail if it takes longer)
+ Thread.sleep(1000)
+ }
+ enterBarrier("added-c")
+
+ runOn(first, second) {
+ assertValue(KeyA, Set("a", "b", "c", "d"))
+ }
+ runOn(third) {
+ // the delta for "c" should not be applied because it has not received previous delta for "b"
+ // and full gossip is turned off so far
+ assertValue(KeyA, Set("a"))
+ }
+ enterBarrier("verified-before-full-gossip")
+
+ replicator ! Replicator.Internal.TestFullStateGossip(enabled = true)
+ assertValue(KeyA, Set("a", "b", "c", "d"))
+ enterBarrier("verified-after-full-gossip")
+
+ replicator ! Replicator.Internal.TestFullStateGossip(enabled = false)
+
+ // and now the delta seqNr should be in sync again
+ runOn(first) {
+ replicator ! Update(KeyA, ORSet.empty[String], WriteLocal)(_ + "e")
+ expectMsg(UpdateSuccess(KeyA, None))
+ }
+ assertValue(KeyA, Set("a", "b", "c", "d", "e"))
+
+ enterBarrier("after-2")
+ }
+
+ }
+
+}
+
diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala
index e4bbca4c7d..3b29819b91 100644
--- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala
+++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala
@@ -22,6 +22,7 @@ object ReplicatorSpec extends MultiNodeConfig {
akka.loglevel = INFO
akka.actor.provider = "cluster"
akka.log-dead-letters-during-shutdown = off
+ #akka.cluster.distributed-data.delta-crdt.enabled = off
"""))
testTransport(on = true)
diff --git a/akka-distributed-data/src/test/java/akka/cluster/ddata/JavaImplOfDeltaReplicatedData.java b/akka-distributed-data/src/test/java/akka/cluster/ddata/JavaImplOfDeltaReplicatedData.java
index 8db3842051..ffc5369aa6 100644
--- a/akka-distributed-data/src/test/java/akka/cluster/ddata/JavaImplOfDeltaReplicatedData.java
+++ b/akka-distributed-data/src/test/java/akka/cluster/ddata/JavaImplOfDeltaReplicatedData.java
@@ -3,10 +3,12 @@
*/
package akka.cluster.ddata;
-import akka.cluster.UniqueAddress;
-public class JavaImplOfDeltaReplicatedData extends AbstractDeltaReplicatedData implements
- RemovedNodePruning {
+import java.util.Optional;
+
+// same delta type
+public class JavaImplOfDeltaReplicatedData extends AbstractDeltaReplicatedData
+ implements ReplicatedDelta {
@Override
public JavaImplOfDeltaReplicatedData mergeData(JavaImplOfDeltaReplicatedData other) {
@@ -14,32 +16,23 @@ public class JavaImplOfDeltaReplicatedData extends AbstractDeltaReplicatedData deltaData() {
+ return Optional.empty();
+ }
+
@Override
public JavaImplOfDeltaReplicatedData resetDelta() {
return this;
}
@Override
- public scala.collection.immutable.Set modifiedByNodes() {
- return akka.japi.Util.immutableSeq(new java.util.ArrayList()).toSet();
+ public JavaImplOfDeltaReplicatedData zero() {
+ return new JavaImplOfDeltaReplicatedData();
}
- @Override
- public boolean needPruningFrom(UniqueAddress removedNode) {
- return false;
- }
-
- @Override
- public JavaImplOfDeltaReplicatedData prune(UniqueAddress removedNode, UniqueAddress collapseInto) {
- return this;
- }
-
- @Override
- public JavaImplOfDeltaReplicatedData pruningCleanup(UniqueAddress removedNode) {
- return this;
- }
}
diff --git a/akka-distributed-data/src/test/java/akka/cluster/ddata/JavaImplOfDeltaReplicatedData2.java b/akka-distributed-data/src/test/java/akka/cluster/ddata/JavaImplOfDeltaReplicatedData2.java
new file mode 100644
index 0000000000..8ecb53564e
--- /dev/null
+++ b/akka-distributed-data/src/test/java/akka/cluster/ddata/JavaImplOfDeltaReplicatedData2.java
@@ -0,0 +1,49 @@
+/**
+ * Copyright (C) 2017 Lightbend Inc.
+ */
+package akka.cluster.ddata;
+
+
+import java.util.Optional;
+
+import akka.cluster.UniqueAddress;
+
+// different delta type
+public class JavaImplOfDeltaReplicatedData2
+ extends AbstractDeltaReplicatedData {
+
+ public static class Delta extends AbstractReplicatedData implements ReplicatedDelta, RequiresCausalDeliveryOfDeltas {
+ @Override
+ public Delta mergeData(Delta other) {
+ return this;
+ }
+
+ @Override
+ public JavaImplOfDeltaReplicatedData2 zero() {
+ return new JavaImplOfDeltaReplicatedData2();
+ }
+ }
+
+ @Override
+ public JavaImplOfDeltaReplicatedData2 mergeData(JavaImplOfDeltaReplicatedData2 other) {
+ return this;
+ }
+
+ @Override
+ public JavaImplOfDeltaReplicatedData2 mergeDeltaData(Delta other) {
+ return this;
+ }
+
+ @Override
+ public Optional deltaData() {
+ return Optional.empty();
+ }
+
+ @Override
+ public JavaImplOfDeltaReplicatedData2 resetDelta() {
+ return this;
+ }
+
+
+
+}
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala
index f58db38454..e511e024fa 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala
@@ -4,17 +4,24 @@
package akka.cluster.ddata
import akka.actor.Address
+import akka.cluster.UniqueAddress
+import akka.cluster.ddata.Key.KeyId
import akka.cluster.ddata.Replicator.Internal.DataEnvelope
+import akka.cluster.ddata.Replicator.Internal.Delta
import akka.cluster.ddata.Replicator.Internal.DeltaPropagation
import org.scalactic.TypeCheckedTripleEquals
import org.scalatest.Matchers
import org.scalatest.WordSpec
object DeltaPropagationSelectorSpec {
- class TestSelector(override val allNodes: Vector[Address]) extends DeltaPropagationSelector {
- override val divisor = 5
- override def createDeltaPropagation(deltas: Map[String, ReplicatedData]): DeltaPropagation =
- DeltaPropagation(deltas.mapValues(d ⇒ DataEnvelope(d)))
+ class TestSelector(
+ val selfUniqueAddress: UniqueAddress,
+ override val allNodes: Vector[Address]) extends DeltaPropagationSelector {
+ override val gossipIntervalDivisor = 5
+ override def createDeltaPropagation(deltas: Map[KeyId, (ReplicatedData, Long, Long)]): DeltaPropagation =
+ DeltaPropagation(selfUniqueAddress, deltas.mapValues {
+ case (d, fromSeqNr, toSeqNr) ⇒ Delta(DataEnvelope(d), fromSeqNr, toSeqNr)
+ })
}
val deltaA = GSet.empty[String] + "a"
@@ -24,11 +31,12 @@ object DeltaPropagationSelectorSpec {
class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheckedTripleEquals {
import DeltaPropagationSelectorSpec._
+ val selfUniqueAddress = UniqueAddress(Address("akka", "Sys", "localhost", 4999), 1L)
val nodes = (2500 until 2600).map(n ⇒ Address("akka", "Sys", "localhost", n)).toVector
"DeltaPropagationSelector" must {
"collect none when no nodes" in {
- val selector = new TestSelector(Vector.empty)
+ val selector = new TestSelector(selfUniqueAddress, Vector.empty)
selector.update("A", deltaA)
selector.collectPropagations() should ===(Map.empty[Address, DeltaPropagation])
selector.cleanupDeltaEntries()
@@ -36,13 +44,15 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck
}
"collect 1 when one node" in {
- val selector = new TestSelector(nodes.take(1))
+ val selector = new TestSelector(selfUniqueAddress, nodes.take(1))
selector.update("A", deltaA)
selector.update("B", deltaB)
selector.cleanupDeltaEntries()
selector.hasDeltaEntries("A") should ===(true)
selector.hasDeltaEntries("B") should ===(true)
- val expected = DeltaPropagation(Map("A" → DataEnvelope(deltaA), "B" → DataEnvelope(deltaB)))
+ val expected = DeltaPropagation(selfUniqueAddress, Map(
+ "A" → Delta(DataEnvelope(deltaA), 1L, 1L),
+ "B" → Delta(DataEnvelope(deltaB), 1L, 1L)))
selector.collectPropagations() should ===(Map(nodes(0) → expected))
selector.collectPropagations() should ===(Map.empty[Address, DeltaPropagation])
selector.cleanupDeltaEntries()
@@ -51,10 +61,12 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck
}
"collect 2+1 when three nodes" in {
- val selector = new TestSelector(nodes.take(3))
+ val selector = new TestSelector(selfUniqueAddress, nodes.take(3))
selector.update("A", deltaA)
selector.update("B", deltaB)
- val expected = DeltaPropagation(Map("A" → DataEnvelope(deltaA), "B" → DataEnvelope(deltaB)))
+ val expected = DeltaPropagation(selfUniqueAddress, Map(
+ "A" → Delta(DataEnvelope(deltaA), 1L, 1L),
+ "B" → Delta(DataEnvelope(deltaB), 1L, 1L)))
selector.collectPropagations() should ===(Map(nodes(0) → expected, nodes(1) → expected))
selector.cleanupDeltaEntries()
selector.hasDeltaEntries("A") should ===(true)
@@ -67,16 +79,21 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck
}
"keep track of deltas per node" in {
- val selector = new TestSelector(nodes.take(3))
+ val selector = new TestSelector(selfUniqueAddress, nodes.take(3))
selector.update("A", deltaA)
selector.update("B", deltaB)
- val expected1 = DeltaPropagation(Map("A" → DataEnvelope(deltaA), "B" → DataEnvelope(deltaB)))
+ val expected1 = DeltaPropagation(selfUniqueAddress, Map(
+ "A" → Delta(DataEnvelope(deltaA), 1L, 1L),
+ "B" → Delta(DataEnvelope(deltaB), 1L, 1L)))
selector.collectPropagations() should ===(Map(nodes(0) → expected1, nodes(1) → expected1))
// new update before previous was propagated to all nodes
selector.update("C", deltaC)
- val expected2 = DeltaPropagation(Map("A" → DataEnvelope(deltaA), "B" → DataEnvelope(deltaB),
- "C" → DataEnvelope(deltaC)))
- val expected3 = DeltaPropagation(Map("C" → DataEnvelope(deltaC)))
+ val expected2 = DeltaPropagation(selfUniqueAddress, Map(
+ "A" → Delta(DataEnvelope(deltaA), 1L, 1L),
+ "B" → Delta(DataEnvelope(deltaB), 1L, 1L),
+ "C" → Delta(DataEnvelope(deltaC), 1L, 1L)))
+ val expected3 = DeltaPropagation(selfUniqueAddress, Map(
+ "C" → Delta(DataEnvelope(deltaC), 1L, 1L)))
selector.collectPropagations() should ===(Map(nodes(2) → expected2, nodes(0) → expected3))
selector.cleanupDeltaEntries()
selector.hasDeltaEntries("A") should ===(false)
@@ -88,17 +105,22 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck
selector.hasDeltaEntries("C") should ===(false)
}
- "merge updates that occur within same tick" in {
+ "bump version for each update" in {
val delta1 = GSet.empty[String] + "a1"
val delta2 = GSet.empty[String] + "a2"
val delta3 = GSet.empty[String] + "a3"
- val selector = new TestSelector(nodes.take(1))
+ val selector = new TestSelector(selfUniqueAddress, nodes.take(1))
selector.update("A", delta1)
+ selector.currentVersion("A") should ===(1L)
selector.update("A", delta2)
- val expected1 = DeltaPropagation(Map("A" → DataEnvelope(delta1.merge(delta2))))
+ selector.currentVersion("A") should ===(2L)
+ val expected1 = DeltaPropagation(selfUniqueAddress, Map(
+ "A" → Delta(DataEnvelope(delta1.merge(delta2)), 1L, 2L)))
selector.collectPropagations() should ===(Map(nodes(0) → expected1))
selector.update("A", delta3)
- val expected2 = DeltaPropagation(Map("A" → DataEnvelope(delta3)))
+ selector.currentVersion("A") should ===(3L)
+ val expected2 = DeltaPropagation(selfUniqueAddress, Map(
+ "A" → Delta(DataEnvelope(delta3), 3L, 3L)))
selector.collectPropagations() should ===(Map(nodes(0) → expected2))
selector.collectPropagations() should ===(Map.empty[Address, DeltaPropagation])
}
@@ -107,32 +129,37 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck
val delta1 = GSet.empty[String] + "a1"
val delta2 = GSet.empty[String] + "a2"
val delta3 = GSet.empty[String] + "a3"
- val selector = new TestSelector(nodes.take(3)) {
+ val selector = new TestSelector(selfUniqueAddress, nodes.take(3)) {
override def nodesSliceSize(allNodesSize: Int): Int = 1
}
selector.update("A", delta1)
- val expected1 = DeltaPropagation(Map("A" → DataEnvelope(delta1)))
+ val expected1 = DeltaPropagation(selfUniqueAddress, Map(
+ "A" → Delta(DataEnvelope(delta1), 1L, 1L)))
selector.collectPropagations() should ===(Map(nodes(0) → expected1))
selector.update("A", delta2)
- val expected2 = DeltaPropagation(Map("A" → DataEnvelope(delta1.merge(delta2))))
+ val expected2 = DeltaPropagation(selfUniqueAddress, Map(
+ "A" → Delta(DataEnvelope(delta1.merge(delta2)), 1L, 2L)))
selector.collectPropagations() should ===(Map(nodes(1) → expected2))
selector.update("A", delta3)
- val expected3 = DeltaPropagation(Map("A" → DataEnvelope(delta1.merge(delta2).merge(delta3))))
+ val expected3 = DeltaPropagation(selfUniqueAddress, Map(
+ "A" → Delta(DataEnvelope(delta1.merge(delta2).merge(delta3)), 1L, 3L)))
selector.collectPropagations() should ===(Map(nodes(2) → expected3))
- val expected4 = DeltaPropagation(Map("A" → DataEnvelope(delta2.merge(delta3))))
+ val expected4 = DeltaPropagation(selfUniqueAddress, Map(
+ "A" → Delta(DataEnvelope(delta2.merge(delta3)), 2L, 3L)))
selector.collectPropagations() should ===(Map(nodes(0) → expected4))
- val expected5 = DeltaPropagation(Map("A" → DataEnvelope(delta3)))
+ val expected5 = DeltaPropagation(selfUniqueAddress, Map(
+ "A" → Delta(DataEnvelope(delta3), 3L, 3L)))
selector.collectPropagations() should ===(Map(nodes(1) → expected5))
selector.collectPropagations() should ===(Map.empty[Address, DeltaPropagation])
}
"calcualte right slice size" in {
- val selector = new TestSelector(nodes)
+ val selector = new TestSelector(selfUniqueAddress, nodes)
selector.nodesSliceSize(0) should ===(0)
selector.nodesSliceSize(1) should ===(1)
(2 to 9).foreach { n ⇒
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/GCounterSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/GCounterSpec.scala
index 9f7402ca65..5efd0fc875 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/GCounterSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/GCounterSpec.scala
@@ -30,9 +30,12 @@ class GCounterSpec extends WordSpec with Matchers {
c6.state(node1) should be(2)
c6.state(node2) should be(3)
- c2.delta.state(node1) should be(1)
- c3.delta.state(node1) should be(2)
- c6.delta.state(node2) should be(3)
+ c2.delta.get.state(node1) should be(1)
+ c1.mergeDelta(c2.delta.get) should be(c2)
+ c3.delta.get.state(node1) should be(2)
+ c2.mergeDelta(c3.delta.get) should be(c3)
+ c6.delta.get.state(node2) should be(3)
+ c5.mergeDelta(c6.delta.get) should be(c6)
}
"be able to increment each node's record by arbitrary delta" in {
@@ -95,13 +98,13 @@ class GCounterSpec extends WordSpec with Matchers {
merged1.state(node1) should be(7)
merged1.state(node2) should be(10)
merged1.value should be(17)
- merged1.delta should be(GCounter.empty)
+ merged1.delta should ===(None)
val merged2 = c26 merge c16
merged2.state(node1) should be(7)
merged2.state(node2) should be(10)
merged2.value should be(17)
- merged2.delta should be(GCounter.empty)
+ merged2.delta should ===(None)
}
"be able to have its history correctly merged with another GCounter 2" in {
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/GSetSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/GSetSpec.scala
index 6387bf463f..3a5052c764 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/GSetSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/GSetSpec.scala
@@ -64,14 +64,14 @@ class GSetSpec extends WordSpec with Matchers {
val c12 = c11 + user1
val c13 = c12 + user2
- c12.delta.elements should ===(Set(user1))
- c13.delta.elements should ===(Set(user1, user2))
+ c12.delta.get.elements should ===(Set(user1))
+ c13.delta.get.elements should ===(Set(user1, user2))
// deltas build state
- (c12 merge c13.delta) should ===(c13)
+ (c12 mergeDelta c13.delta.get) should ===(c13)
// own deltas are idempotent
- (c13 merge c13.delta) should ===(c13)
+ (c13 mergeDelta c13.delta.get) should ===(c13)
// set 2
val c21 = GSet.empty[String]
@@ -79,18 +79,18 @@ class GSetSpec extends WordSpec with Matchers {
val c22 = c21 + user3
val c23 = c22.resetDelta + user4
- c22.delta.elements should ===(Set(user3))
- c23.delta.elements should ===(Set(user4))
+ c22.delta.get.elements should ===(Set(user3))
+ c23.delta.get.elements should ===(Set(user4))
c23.elements should ===(Set(user3, user4))
val c33 = c13 merge c23
// merge both ways
- val merged1 = GSet.empty[String] merge c12.delta merge c13.delta merge c22.delta merge c23.delta
+ val merged1 = GSet.empty[String] mergeDelta c12.delta.get mergeDelta c13.delta.get mergeDelta c22.delta.get mergeDelta c23.delta.get
merged1.elements should ===(Set(user1, user2, user3, user4))
- val merged2 = GSet.empty[String] merge c23.delta merge c13.delta merge c22.delta
+ val merged2 = GSet.empty[String] mergeDelta c23.delta.get mergeDelta c13.delta.get mergeDelta c22.delta.get
merged2.elements should ===(Set(user1, user2, user3, user4))
merged1 should ===(c33)
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala
index ef27e265e7..73febe05b1 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala
@@ -14,17 +14,17 @@ import org.scalatest.WordSpec
class ORSetSpec extends WordSpec with Matchers {
- val node1 = UniqueAddress(Address("akka.tcp", "Sys", "localhost", 2551), 1)
- val node2 = UniqueAddress(node1.address.copy(port = Some(2552)), 2)
+ val node1 = UniqueAddress(Address("akka.tcp", "Sys", "localhost", 2551), 1L)
+ val node2 = UniqueAddress(node1.address.copy(port = Some(2552)), 2L)
- val nodeA = UniqueAddress(Address("akka.tcp", "Sys", "a", 2552), 1)
- val nodeB = UniqueAddress(nodeA.address.copy(host = Some("b")), 2)
- val nodeC = UniqueAddress(nodeA.address.copy(host = Some("c")), 3)
- val nodeD = UniqueAddress(nodeA.address.copy(host = Some("d")), 4)
- val nodeE = UniqueAddress(nodeA.address.copy(host = Some("e")), 5)
- val nodeF = UniqueAddress(nodeA.address.copy(host = Some("f")), 6)
- val nodeG = UniqueAddress(nodeA.address.copy(host = Some("g")), 7)
- val nodeH = UniqueAddress(nodeA.address.copy(host = Some("h")), 8)
+ val nodeA = UniqueAddress(Address("akka.tcp", "Sys", "a", 2552), 1L)
+ val nodeB = UniqueAddress(nodeA.address.copy(host = Some("b")), 2L)
+ val nodeC = UniqueAddress(nodeA.address.copy(host = Some("c")), 3L)
+ val nodeD = UniqueAddress(nodeA.address.copy(host = Some("d")), 4L)
+ val nodeE = UniqueAddress(nodeA.address.copy(host = Some("e")), 5L)
+ val nodeF = UniqueAddress(nodeA.address.copy(host = Some("f")), 6L)
+ val nodeG = UniqueAddress(nodeA.address.copy(host = Some("g")), 7L)
+ val nodeH = UniqueAddress(nodeA.address.copy(host = Some("h")), 8L)
val user1 = """{"username":"john","password":"coltrane"}"""
val user2 = """{"username":"sonny","password":"rollins"}"""
@@ -226,6 +226,185 @@ class ORSetSpec extends WordSpec with Matchers {
}
+ "ORSet deltas" must {
+
+ def addDeltaOp(s: ORSet[String]): ORSet.AddDeltaOp[String] =
+ asAddDeltaOp(s.delta.get)
+
+ def asAddDeltaOp(delta: Any): ORSet.AddDeltaOp[String] =
+ delta match {
+ case d: ORSet.AddDeltaOp[String] @unchecked ⇒ d
+ case _ ⇒ throw new IllegalArgumentException("Expected AddDeltaOp")
+ }
+
+ "work for additions" in {
+ val s1 = ORSet.empty[String]
+ val s2 = s1.add(node1, "a")
+ addDeltaOp(s2).underlying.elements should ===(Set("a"))
+ s1.mergeDelta(s2.delta.get) should ===(s2)
+
+ val s3 = s2.resetDelta.add(node1, "b").add(node1, "c")
+ addDeltaOp(s3).underlying.elements should ===(Set("b", "c"))
+ s2.mergeDelta(s3.delta.get) should ===(s3)
+
+ // another node adds "d"
+ val s4 = s3.resetDelta.add(node2, "d")
+ addDeltaOp(s4).underlying.elements should ===(Set("d"))
+ s3.mergeDelta(s4.delta.get) should ===(s4)
+
+ // concurrent update
+ val s5 = s3.resetDelta.add(node1, "e")
+ val s6 = s5.merge(s4)
+ s5.mergeDelta(s4.delta.get) should ===(s6)
+
+ // concurrent add of same element
+ val s7 = s3.resetDelta.add(node1, "d")
+ val s8 = s7.merge(s4)
+ // the dot contains both nodes
+ s8.elementsMap("d").contains(node1)
+ s8.elementsMap("d").contains(node2)
+ // and same result when merging the deltas
+ s7.mergeDelta(s4.delta.get) should ===(s8)
+ s4.mergeDelta(s7.delta.get) should ===(s8)
+ }
+
+ "handle another concurrent add scenario" in {
+ val s1 = ORSet.empty[String]
+ val s2 = s1.add(node1, "a")
+ val s3 = s2.add(node1, "b")
+ val s4 = s2.add(node2, "c")
+
+ // full state merge for reference
+ val s5 = s4.merge(s3)
+ s5.elements should ===(Set("a", "b", "c"))
+
+ val s6 = s4.mergeDelta(s3.delta.get)
+ s6.elements should ===(Set("a", "b", "c"))
+ }
+
+ "merge deltas into delta groups" in {
+ val s1 = ORSet.empty[String]
+ val s2 = s1.add(node1, "a")
+ val d2 = s2.delta.get
+ val s3 = s2.resetDelta.add(node1, "b")
+ val d3 = s3.delta.get
+ val d4 = d2 merge d3
+ asAddDeltaOp(d4).underlying.elements should ===(Set("a", "b"))
+ s1.mergeDelta(d4) should ===(s3)
+ s2.mergeDelta(d4) should ===(s3)
+
+ val s5 = s3.resetDelta.remove(node1, "b")
+ val d5 = s5.delta.get
+ val d6 = (d4 merge d5).asInstanceOf[ORSet.DeltaGroup[String]]
+ d6.ops.last.getClass should ===(classOf[ORSet.RemoveDeltaOp[String]])
+ d6.ops.size should ===(2)
+ s3.mergeDelta(d6) should ===(s5)
+
+ val s7 = s5.resetDelta.add(node1, "c")
+ val s8 = s7.resetDelta.add(node1, "d")
+ val d9 = (d6 merge s7.delta.get merge s8.delta.get).asInstanceOf[ORSet.DeltaGroup[String]]
+ // the add "c" and add "d" are merged into one AddDeltaOp
+ asAddDeltaOp(d9.ops.last).underlying.elements should ===(Set("c", "d"))
+ d9.ops.size should ===(3)
+ s5.mergeDelta(d9) should ===(s8)
+ s5.mergeDelta(s7.delta.get).mergeDelta(s8.delta.get) should ===(s8)
+ }
+
+ "work for removals" in {
+ val s1 = ORSet.empty[String]
+ val s2 = s1.add(node1, "a").add(node1, "b").resetDelta
+ val s3 = s2.remove(node1, "b")
+ s2.merge(s3) should ===(s3)
+ s2.mergeDelta(s3.delta.get) should ===(s3)
+ s2.mergeDelta(s3.delta.get).elements should ===(Set("a"))
+
+ // concurrent update
+ val s4 = s2.add(node2, "c").resetDelta
+ val s5 = s4.merge(s3)
+ s5.elements should ===(Set("a", "c"))
+ s4.mergeDelta(s3.delta.get) should ===(s5)
+
+ // add "b" again
+ val s6 = s5.add(node2, "b")
+ // merging the old delta should not remove it
+ s6.mergeDelta(s3.delta.get) should ===(s6)
+ s6.mergeDelta(s3.delta.get).elements should ===(Set("a", "b", "c"))
+ }
+
+ "work for clear" in {
+ val s1 = ORSet.empty[String]
+ val s2 = s1.add(node1, "a").add(node1, "b")
+ val s3 = s2.resetDelta.clear(node1)
+ val s4 = s3.resetDelta.add(node1, "c")
+ s2.merge(s3) should ===(s3)
+ s2.mergeDelta(s3.delta.get) should ===(s3)
+ val s5 = s2.mergeDelta(s3.delta.get).mergeDelta(s4.delta.get)
+ s5.elements should ===(Set("c"))
+ s5 should ===(s4)
+
+ // concurrent update
+ val s6 = s2.resetDelta.add(node2, "d")
+ val s7 = s6.merge(s3)
+ s7.elements should ===(Set("d"))
+ s6.mergeDelta(s3.delta.get) should ===(s7)
+
+ // add "b" again
+ val s8 = s7.add(node2, "b")
+ // merging the old delta should not remove it
+ s8.mergeDelta(s3.delta.get) should ===(s8)
+ s8.mergeDelta(s3.delta.get).elements should ===(Set("b", "d"))
+ }
+
+ "handle a mixed add/remove scenario" in {
+ val s1 = ORSet.empty[String]
+ val s2 = s1.resetDelta.remove(node1, "e")
+ val s3 = s2.resetDelta.add(node1, "b")
+ val s4 = s3.resetDelta.add(node1, "a")
+ val s5 = s4.resetDelta.remove(node1, "b")
+
+ val deltaGroup1 = s3.delta.get merge s4.delta.get merge s5.delta.get
+
+ val s7 = s2 mergeDelta deltaGroup1
+ s7.elements should ===(Set("a"))
+ // The above scenario was constructed from failing ReplicatorDeltaSpec,
+ // some more checks...
+
+ val s8 = s2.resetDelta.add(node2, "z") // concurrent update from node2
+ val s9 = s8 mergeDelta deltaGroup1
+ s9.elements should ===(Set("a", "z"))
+ }
+
+ "require causal delivery of deltas" in {
+ // This test illustrates why we need causal delivery of deltas.
+ // Otherwise the following could happen.
+
+ // s0 is the stable state that is initially replicated to all nodes
+ val s0 = ORSet.empty[String].add(node1, "a")
+
+ // add element "b" and "c" at node1
+ val s11 = s0.resetDelta.add(node1, "b")
+ val s12 = s11.resetDelta.add(node1, "c")
+
+ // at the same time, add element "d" at node2
+ val s21 = s0.resetDelta.add(node2, "d")
+
+ // node3 receives delta for "d" and "c", but the delta for "b" is lost
+ val s31 = s0 mergeDelta s21.delta.get mergeDelta s12.delta.get
+ s31.elements should ===(Set("a", "c", "d"))
+
+ // node4 receives all deltas
+ val s41 = s0 mergeDelta s11.delta.get mergeDelta s12.delta.get mergeDelta s21.delta.get
+ s41.elements should ===(Set("a", "b", "c", "d"))
+
+ // node3 and node4 sync with full state gossip
+ val s32 = s31 merge s41
+ // one would expect elements "a", "b", "c", "d", but "b" is removed
+ // because we applied s12.delta without applying s11.delta
+ s32.elements should ===(Set("a", "c", "d"))
+ }
+
+ }
+
"ORSet unit test" must {
"verify subtractDots" in {
val dot = VersionVector(TreeMap(nodeA → 3L, nodeB → 2L, nodeD → 14L, nodeG → 22L))
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterSpec.scala
index 40648ea29f..d04c99efb8 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterSpec.scala
@@ -29,13 +29,13 @@ class PNCounterSpec extends WordSpec with Matchers {
c6.increments.state(node1) should be(2)
c6.increments.state(node2) should be(3)
- c2.delta.value.toLong should be(1)
- c2.delta.increments.state(node1) should be(1)
- c3.delta.value should be(2)
- c3.delta.increments.state(node1) should be(2)
+ c2.delta.get.value.toLong should be(1)
+ c2.delta.get.increments.state(node1) should be(1)
+ c3.delta.get.value should be(2)
+ c3.delta.get.increments.state(node1) should be(2)
- c6.delta.value should be(3)
- c6.delta.increments.state(node2) should be(3)
+ c6.delta.get.value should be(3)
+ c6.delta.get.increments.state(node2) should be(3)
}
"be able to decrement each node's record by one" in {
@@ -51,11 +51,11 @@ class PNCounterSpec extends WordSpec with Matchers {
c6.decrements.state(node1) should be(2)
c6.decrements.state(node2) should be(3)
- c3.delta.value should be(-2)
- c3.delta.decrements.state(node1) should be(2)
+ c3.delta.get.value should be(-2)
+ c3.delta.get.decrements.state(node1) should be(2)
- c6.delta.value should be(-3)
- c6.delta.decrements.state(node2) should be(3)
+ c6.delta.get.value should be(-3)
+ c6.delta.get.decrements.state(node2) should be(3)
}
"be able to increment each node's record by arbitrary delta" in {
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala
index 8af0e963c2..4e53e1045e 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala
@@ -101,9 +101,17 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem(
checkSameContent(s3.merge(s4), s4.merge(s3))
}
+ "serialize ORSet delta" in {
+ checkSerialization(ORSet().add(address1, "a").delta.get)
+ checkSerialization(ORSet().add(address1, "a").resetDelta.remove(address2, "a").delta.get)
+ checkSerialization(ORSet().add(address1, "a").remove(address2, "a").delta.get)
+ checkSerialization(ORSet().add(address1, "a").resetDelta.clear(address2).delta.get)
+ checkSerialization(ORSet().add(address1, "a").clear(address2).delta.get)
+ }
+
"serialize large GSet" in {
val largeSet = (10000 until 20000).foldLeft(GSet.empty[String]) {
- case (acc, n) ⇒ acc.add(n.toString)
+ case (acc, n) ⇒ acc.resetDelta.add(n.toString)
}
val numberOfBytes = checkSerialization(largeSet)
info(s"size of GSet with ${largeSet.size} elements: $numberOfBytes bytes")
@@ -118,7 +126,7 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem(
case 1 ⇒ address2
case 2 ⇒ address3
}
- acc.add(address, n.toString)
+ acc.resetDelta.add(address, n.toString)
}
val numberOfBytes = checkSerialization(largeSet)
// note that ORSet is compressed, and therefore smaller than GSet
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala
index ea32de0d1b..d920da41af 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala
@@ -87,9 +87,9 @@ class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem(
checkSerialization(Gossip(Map(
"A" → DataEnvelope(data1),
"B" → DataEnvelope(GSet() + "b" + "c")), sendBack = true))
- checkSerialization(DeltaPropagation(Map(
- "A" → DataEnvelope(delta1),
- "B" → DataEnvelope(delta2))))
+ checkSerialization(DeltaPropagation(address1, Map(
+ "A" → Delta(DataEnvelope(delta1), 1L, 1L),
+ "B" → Delta(DataEnvelope(delta2), 3L, 5L))))
checkSerialization(new DurableDataEnvelope(data1))
checkSerialization(new DurableDataEnvelope(DataEnvelope(data1, pruning = Map(
address1 → PruningPerformed(System.currentTimeMillis()),
diff --git a/akka-docs/rst/java/distributed-data.rst b/akka-docs/rst/java/distributed-data.rst
index 724f3b7dec..60f95bb9f2 100644
--- a/akka-docs/rst/java/distributed-data.rst
+++ b/akka-docs/rst/java/distributed-data.rst
@@ -272,19 +272,22 @@ for updates. For example adding element ``'c'`` and ``'d'`` to set ``{'a', 'b'}`
result in sending the delta ``{'c', 'd'}`` and merge that with the state on the
receiving side, resulting in set ``{'a', 'b', 'c', 'd'}``.
-Current protocol for replicating the deltas does not support causal consistency.
-It is only eventually consistent. This means that if elements ``'c'`` and ``'d'`` are
+The protocol for replicating the deltas supports causal consistency if the data type
+is marked with ``RequiresCausalDeliveryOfDeltas``. Otherwise it is only eventually
+consistent. Without causal consistency it means that if elements ``'c'`` and ``'d'`` are
added in two separate `Update` operations these deltas may occasionally be propagated
to nodes in different order than the causal order of the updates. For this example it
can result in that set ``{'a', 'b', 'd'}`` can be seen before element 'c' is seen. Eventually
-it will be ``{'a', 'b', 'c', 'd'}``. If causal consistency is needed the delta propagation
-should be disabled with configuration property
-``akka.cluster.distributed-data.delta-crdt.enabled=off``.
+it will be ``{'a', 'b', 'c', 'd'}``.
Note that the full state is occasionally also replicated for delta-CRDTs, for example when
new nodes are added to the cluster or when deltas could not be propagated because
of network partitions or similar problems.
+The the delta propagation can be disabled with configuration property::
+
+ akka.cluster.distributed-data.delta-crdt.enabled=off
+
Data Types
==========
@@ -316,7 +319,8 @@ The value of the counter is the value of the P counter minus the value of the N
.. includecode:: code/docs/ddata/DistributedDataDocTest.java#pncounter
-``GCounter`` and ``PNCounter`` have support for :ref:`delta_crdt_java`.
+``GCounter`` and ``PNCounter`` have support for :ref:`delta_crdt_java` and don't need causal
+delivery of deltas.
Several related counters can be managed in a map with the ``PNCounterMap`` data type.
When the counters are placed in a ``PNCounterMap`` as opposed to placing them as separate top level
@@ -334,6 +338,8 @@ Merge is simply the union of the two sets.
.. includecode:: code/docs/ddata/DistributedDataDocTest.java#gset
+``GSet`` has support for :ref:`delta_crdt_java` and it doesn't require causal delivery of deltas.
+
If you need add and remove operations you should use the ``ORSet`` (observed-remove set).
Elements can be added and removed any number of times. If an element is concurrently added and
removed, the add will win. You cannot remove an element that you have not seen.
@@ -345,6 +351,8 @@ track causality of the operations and resolve concurrent updates.
.. includecode:: code/docs/ddata/DistributedDataDocTest.java#orset
+``ORSet`` has support for :ref:`delta_crdt_java` and it requires causal delivery of deltas.
+
Maps
----
diff --git a/akka-docs/rst/scala/code/docs/serialization/SerializationDocSpec.scala b/akka-docs/rst/scala/code/docs/serialization/SerializationDocSpec.scala
index ddb457a16e..369e51c48c 100644
--- a/akka-docs/rst/scala/code/docs/serialization/SerializationDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/serialization/SerializationDocSpec.scala
@@ -4,7 +4,7 @@
package docs.serialization {
- import akka.actor.{ExtensionId, ExtensionIdProvider}
+ import akka.actor.{ ExtensionId, ExtensionIdProvider }
import akka.testkit._
//#imports
import akka.actor.{ ActorRef, ActorSystem }
diff --git a/akka-docs/rst/scala/distributed-data.rst b/akka-docs/rst/scala/distributed-data.rst
index fc462300af..cc1685a785 100644
--- a/akka-docs/rst/scala/distributed-data.rst
+++ b/akka-docs/rst/scala/distributed-data.rst
@@ -285,19 +285,22 @@ for updates. For example adding element ``'c'`` and ``'d'`` to set ``{'a', 'b'}`
result in sending the delta ``{'c', 'd'}`` and merge that with the state on the
receiving side, resulting in set ``{'a', 'b', 'c', 'd'}``.
-Current protocol for replicating the deltas does not support causal consistency.
-It is only eventually consistent. This means that if elements ``'c'`` and ``'d'`` are
+The protocol for replicating the deltas supports causal consistency if the data type
+is marked with ``RequiresCausalDeliveryOfDeltas``. Otherwise it is only eventually
+consistent. Without causal consistency it means that if elements ``'c'`` and ``'d'`` are
added in two separate `Update` operations these deltas may occasionally be propagated
to nodes in different order than the causal order of the updates. For this example it
can result in that set ``{'a', 'b', 'd'}`` can be seen before element 'c' is seen. Eventually
-it will be ``{'a', 'b', 'c', 'd'}``. If causal consistency is needed the delta propagation
-should be disabled with configuration property
-``akka.cluster.distributed-data.delta-crdt.enabled=off``.
+it will be ``{'a', 'b', 'c', 'd'}``.
Note that the full state is occasionally also replicated for delta-CRDTs, for example when
new nodes are added to the cluster or when deltas could not be propagated because
of network partitions or similar problems.
+The the delta propagation can be disabled with configuration property::
+
+ akka.cluster.distributed-data.delta-crdt.enabled=off
+
Data Types
==========
@@ -329,7 +332,8 @@ The value of the counter is the value of the P counter minus the value of the N
.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#pncounter
-``GCounter`` and ``PNCounter`` have support for :ref:`delta_crdt_scala`.
+``GCounter`` and ``PNCounter`` have support for :ref:`delta_crdt_scala` and don't need causal
+delivery of deltas.
Several related counters can be managed in a map with the ``PNCounterMap`` data type.
When the counters are placed in a ``PNCounterMap`` as opposed to placing them as separate top level
@@ -347,6 +351,8 @@ Merge is simply the union of the two sets.
.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#gset
+``GSet`` has support for :ref:`delta_crdt_scala` and it doesn't require causal delivery of deltas.
+
If you need add and remove operations you should use the ``ORSet`` (observed-remove set).
Elements can be added and removed any number of times. If an element is concurrently added and
removed, the add will win. You cannot remove an element that you have not seen.
@@ -358,6 +364,8 @@ track causality of the operations and resolve concurrent updates.
.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#orset
+``ORSet`` has support for :ref:`delta_crdt_scala` and it requires causal delivery of deltas.
+
Maps
----
diff --git a/project/MiMa.scala b/project/MiMa.scala
index 64b2c0cf30..d1ed4759be 100644
--- a/project/MiMa.scala
+++ b/project/MiMa.scala
@@ -101,7 +101,15 @@ object MiMa extends AutoPlugin {
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.GSet.this"), // constructor supplied by companion object
// #21875 delta-CRDT
- ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.GCounter.this"),
+ ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.GCounter.this"),
+
+ // #22188 ORSet delta-CRDT
+ ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.ORSet.this"),
+ ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.protobuf.SerializationSupport.versionVectorToProto"),
+ ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.protobuf.SerializationSupport.versionVectorFromProto"),
+ ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.ddata.protobuf.SerializationSupport.versionVectorFromBinary"),
+ ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.ddata.protobuf.ReplicatedDataSerializer.versionVectorToProto"),
+ ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.protobuf.ReplicatedDataSerializer.versionVectorFromProto"),
// #22141 sharding minCap
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.DDataShardCoordinator.updatingStateTimeout"),