From cbe5dd2cf5d30556126b89f0c37b837edd11f8d1 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Sun, 17 May 2015 12:28:47 +0200 Subject: [PATCH] +cdd #16799 Add Distributed Data module Previously know as [patriknw/akka-data-replication](https://github.com/patriknw/akka-data-replication), which was originally inspired by [jboner/akka-crdt](https://github.com/jboner/akka-crdt). The functionality is very similar to akka-data-replication 0.11. Here is a list of the most important changes: * The package name changed to `akka.cluster.ddata` * The extension was renamed to `DistributedData` * The keys changed from strings to classes with unique identifiers and type information of the data values, e.g. `ORSetKey[Int]("set2")` * The optional read consistency parameter was removed from the `Update` message. If you need to read from other replicas before performing the update you have to first send a `Get` message and then continue with the ``Update`` when the ``GetSuccess`` is received. * `BigInt` is used in `GCounter` and `PNCounter` instead of `Long` * Improvements of java api * Better documentation --- akka-distributed-data/build.sbt | 14 + .../protobuf/msg/ReplicatedDataMessages.java | 12543 +++++++++++++ .../protobuf/msg/ReplicatorMessages.java | 14959 ++++++++++++++++ .../protobuf/ReplicatedDataMessages.proto | 92 + .../main/protobuf/ReplicatorMessages.proto | 118 + .../src/main/resources/reference.conf | 61 + .../akka/cluster/ddata/DistributedData.scala | 52 + .../main/scala/akka/cluster/ddata/Flag.scala | 45 + .../scala/akka/cluster/ddata/GCounter.scala | 131 + .../main/scala/akka/cluster/ddata/GSet.scala | 66 + .../main/scala/akka/cluster/ddata/Key.scala | 35 + .../scala/akka/cluster/ddata/LWWMap.scala | 154 + .../akka/cluster/ddata/LWWRegister.scala | 178 + .../main/scala/akka/cluster/ddata/ORMap.scala | 231 + .../main/scala/akka/cluster/ddata/ORSet.scala | 299 + .../scala/akka/cluster/ddata/PNCounter.scala | 134 + .../akka/cluster/ddata/PNCounterMap.scala | 149 + .../akka/cluster/ddata/PruningState.scala | 45 + .../akka/cluster/ddata/ReplicatedData.scala | 82 + .../scala/akka/cluster/ddata/Replicator.scala | 1467 ++ .../akka/cluster/ddata/VersionVector.scala | 226 + .../protobuf/ReplicatedDataSerializer.scala | 404 + .../ReplicatorMessageSerializer.scala | 326 + .../ddata/protobuf/SerializationSupport.scala | 144 + .../ddata/JepsenInspiredInsertSpec.scala | 283 + .../akka/cluster/ddata/PerformanceSpec.scala | 233 + .../cluster/ddata/ReplicatorChaosSpec.scala | 234 + .../cluster/ddata/ReplicatorPruningSpec.scala | 197 + .../akka/cluster/ddata/ReplicatorSpec.scala | 503 + .../akka/cluster/ddata/STMultiNodeSpec.scala | 20 + .../distributeddata/ReplicatedCacheSpec.scala | 187 + .../ReplicatedMetricsSpec.scala | 200 + .../ReplicatedServiceRegistrySpec.scala | 267 + .../ReplicatedShoppingCartSpec.scala | 214 + .../distributeddata/VotingContestSpec.scala | 184 + .../ddata/JavaImplOfReplicatedData.java | 29 + .../src/test/resources/reference.conf | 2 + .../scala/akka/cluster/ddata/FlagSpec.scala | 45 + .../akka/cluster/ddata/GCounterSpec.scala | 171 + .../scala/akka/cluster/ddata/GSetSpec.scala | 119 + .../scala/akka/cluster/ddata/LWWMapSpec.scala | 63 + .../akka/cluster/ddata/LWWRegisterSpec.scala | 92 + .../cluster/ddata/LocalConcurrencySpec.scala | 81 + .../scala/akka/cluster/ddata/ORMapSpec.scala | 205 + .../scala/akka/cluster/ddata/ORSetSpec.scala | 355 + .../akka/cluster/ddata/PNCounterMapSpec.scala | 62 + .../akka/cluster/ddata/PNCounterSpec.scala | 172 + .../akka/cluster/ddata/PruningStateSpec.scala | 46 + .../cluster/ddata/VersionVectorSpec.scala | 249 + .../ReplicatedDataSerializerSpec.scala | 173 + .../ReplicatorMessageSerializerSpec.scala | 81 + .../akka/cluster/ddata/sample/DataBot.scala | 98 + .../cluster/ddata/sample/LotsOfDataBot.scala | 137 + akka-docs/rst/general/configuration.rst | 32 + .../rst/java/code/docs/ddata/DataBot.java | 95 + .../docs/ddata/DistributedDataDocTest.java | 411 + akka-docs/rst/java/distributed-data.rst | 516 + akka-docs/rst/java/index-network.rst | 1 + .../docs/ddata/DistributedDataDocSpec.scala | 381 + .../scala/code/docs/ddata/TwoPhaseSet.scala | 29 + .../protobuf/TwoPhaseSetSerializer.scala | 75 + .../protobuf/TwoPhaseSetSerializer2.scala | 59 + akka-docs/rst/scala/distributed-data.rst | 504 + akka-docs/rst/scala/index-network.rst | 1 + .../protobuf/msg/TwoPhaseSetMessages.java | 1236 ++ .../main/protobuf/TwoPhaseSetMessages.proto | 24 + project/AkkaBuild.scala | 14 +- project/Dependencies.scala | 2 + project/OSGi.scala | 2 + 69 files changed, 40036 insertions(+), 3 deletions(-) create mode 100644 akka-distributed-data/build.sbt create mode 100644 akka-distributed-data/src/main/java/akka/cluster/ddata/protobuf/msg/ReplicatedDataMessages.java create mode 100644 akka-distributed-data/src/main/java/akka/cluster/ddata/protobuf/msg/ReplicatorMessages.java create mode 100644 akka-distributed-data/src/main/protobuf/ReplicatedDataMessages.proto create mode 100644 akka-distributed-data/src/main/protobuf/ReplicatorMessages.proto create mode 100644 akka-distributed-data/src/main/resources/reference.conf create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/Flag.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/GSet.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala create mode 100644 akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala create mode 100644 akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala create mode 100644 akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala create mode 100644 akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala create mode 100644 akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala create mode 100644 akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala create mode 100644 akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/STMultiNodeSpec.scala create mode 100644 akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedCacheSpec.scala create mode 100644 akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedMetricsSpec.scala create mode 100644 akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedServiceRegistrySpec.scala create mode 100644 akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedShoppingCartSpec.scala create mode 100644 akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/VotingContestSpec.scala create mode 100644 akka-distributed-data/src/test/java/akka/cluster/ddata/JavaImplOfReplicatedData.java create mode 100644 akka-distributed-data/src/test/resources/reference.conf create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/FlagSpec.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/GCounterSpec.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/GSetSpec.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWRegisterSpec.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterSpec.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/PruningStateSpec.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/VersionVectorSpec.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/sample/DataBot.scala create mode 100644 akka-distributed-data/src/test/scala/akka/cluster/ddata/sample/LotsOfDataBot.scala create mode 100644 akka-docs/rst/java/code/docs/ddata/DataBot.java create mode 100644 akka-docs/rst/java/code/docs/ddata/DistributedDataDocTest.java create mode 100644 akka-docs/rst/java/distributed-data.rst create mode 100644 akka-docs/rst/scala/code/docs/ddata/DistributedDataDocSpec.scala create mode 100644 akka-docs/rst/scala/code/docs/ddata/TwoPhaseSet.scala create mode 100644 akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer.scala create mode 100644 akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala create mode 100644 akka-docs/rst/scala/distributed-data.rst create mode 100644 akka-docs/src/main/java/docs/ddata/protobuf/msg/TwoPhaseSetMessages.java create mode 100644 akka-docs/src/main/protobuf/TwoPhaseSetMessages.proto diff --git a/akka-distributed-data/build.sbt b/akka-distributed-data/build.sbt new file mode 100644 index 0000000000..e065dffb63 --- /dev/null +++ b/akka-distributed-data/build.sbt @@ -0,0 +1,14 @@ +import akka.{ AkkaBuild, Dependencies, Formatting, MultiNode, Unidoc, OSGi } +import com.typesafe.tools.mima.plugin.MimaKeys + +AkkaBuild.defaultSettings + +Formatting.formatSettings + +OSGi.distributedData + +Dependencies.distributedData + +//MimaKeys.previousArtifact := akkaPreviousArtifact("akka-distributed-data").value + +enablePlugins(MultiNodeScalaTest) diff --git a/akka-distributed-data/src/main/java/akka/cluster/ddata/protobuf/msg/ReplicatedDataMessages.java b/akka-distributed-data/src/main/java/akka/cluster/ddata/protobuf/msg/ReplicatedDataMessages.java new file mode 100644 index 0000000000..a9d111c12a --- /dev/null +++ b/akka-distributed-data/src/main/java/akka/cluster/ddata/protobuf/msg/ReplicatedDataMessages.java @@ -0,0 +1,12543 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: ReplicatedDataMessages.proto + +package akka.cluster.ddata.protobuf.msg; + +public final class ReplicatedDataMessages { + private ReplicatedDataMessages() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface GSetOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated string stringElements = 1; + /** + * repeated string stringElements = 1; + */ + java.util.List + getStringElementsList(); + /** + * repeated string stringElements = 1; + */ + int getStringElementsCount(); + /** + * repeated string stringElements = 1; + */ + java.lang.String getStringElements(int index); + /** + * repeated string stringElements = 1; + */ + com.google.protobuf.ByteString + getStringElementsBytes(int index); + + // repeated sint32 intElements = 2 [packed = true]; + /** + * repeated sint32 intElements = 2 [packed = true]; + */ + java.util.List getIntElementsList(); + /** + * repeated sint32 intElements = 2 [packed = true]; + */ + int getIntElementsCount(); + /** + * repeated sint32 intElements = 2 [packed = true]; + */ + int getIntElements(int index); + + // repeated sint64 longElements = 3 [packed = true]; + /** + * repeated sint64 longElements = 3 [packed = true]; + */ + java.util.List getLongElementsList(); + /** + * repeated sint64 longElements = 3 [packed = true]; + */ + int getLongElementsCount(); + /** + * repeated sint64 longElements = 3 [packed = true]; + */ + long getLongElements(int index); + + // repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + java.util.List + getOtherElementsList(); + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getOtherElements(int index); + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + int getOtherElementsCount(); + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + java.util.List + getOtherElementsOrBuilderList(); + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getOtherElementsOrBuilder( + int index); + } + /** + * Protobuf type {@code akka.cluster.ddata.GSet} + */ + public static final class GSet extends + com.google.protobuf.GeneratedMessage + implements GSetOrBuilder { + // Use GSet.newBuilder() to construct. + private GSet(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GSet(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GSet defaultInstance; + public static GSet getDefaultInstance() { + return defaultInstance; + } + + public GSet getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GSet( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + stringElements_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + stringElements_.add(input.readBytes()); + break; + } + case 16: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + intElements_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + intElements_.add(input.readSInt32()); + break; + } + case 18: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002) && input.getBytesUntilLimit() > 0) { + intElements_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + while (input.getBytesUntilLimit() > 0) { + intElements_.add(input.readSInt32()); + } + input.popLimit(limit); + break; + } + case 24: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + longElements_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + longElements_.add(input.readSInt64()); + break; + } + case 26: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) { + longElements_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + while (input.getBytesUntilLimit() > 0) { + longElements_.add(input.readSInt64()); + } + input.popLimit(limit); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + otherElements_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + otherElements_.add(input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + stringElements_ = new com.google.protobuf.UnmodifiableLazyStringList(stringElements_); + } + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + intElements_ = java.util.Collections.unmodifiableList(intElements_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + longElements_ = java.util.Collections.unmodifiableList(longElements_); + } + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + otherElements_ = java.util.Collections.unmodifiableList(otherElements_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GSet_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GSet parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GSet(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated string stringElements = 1; + public static final int STRINGELEMENTS_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList stringElements_; + /** + * repeated string stringElements = 1; + */ + public java.util.List + getStringElementsList() { + return stringElements_; + } + /** + * repeated string stringElements = 1; + */ + public int getStringElementsCount() { + return stringElements_.size(); + } + /** + * repeated string stringElements = 1; + */ + public java.lang.String getStringElements(int index) { + return stringElements_.get(index); + } + /** + * repeated string stringElements = 1; + */ + public com.google.protobuf.ByteString + getStringElementsBytes(int index) { + return stringElements_.getByteString(index); + } + + // repeated sint32 intElements = 2 [packed = true]; + public static final int INTELEMENTS_FIELD_NUMBER = 2; + private java.util.List intElements_; + /** + * repeated sint32 intElements = 2 [packed = true]; + */ + public java.util.List + getIntElementsList() { + return intElements_; + } + /** + * repeated sint32 intElements = 2 [packed = true]; + */ + public int getIntElementsCount() { + return intElements_.size(); + } + /** + * repeated sint32 intElements = 2 [packed = true]; + */ + public int getIntElements(int index) { + return intElements_.get(index); + } + private int intElementsMemoizedSerializedSize = -1; + + // repeated sint64 longElements = 3 [packed = true]; + public static final int LONGELEMENTS_FIELD_NUMBER = 3; + private java.util.List longElements_; + /** + * repeated sint64 longElements = 3 [packed = true]; + */ + public java.util.List + getLongElementsList() { + return longElements_; + } + /** + * repeated sint64 longElements = 3 [packed = true]; + */ + public int getLongElementsCount() { + return longElements_.size(); + } + /** + * repeated sint64 longElements = 3 [packed = true]; + */ + public long getLongElements(int index) { + return longElements_.get(index); + } + private int longElementsMemoizedSerializedSize = -1; + + // repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + public static final int OTHERELEMENTS_FIELD_NUMBER = 4; + private java.util.List otherElements_; + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public java.util.List getOtherElementsList() { + return otherElements_; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public java.util.List + getOtherElementsOrBuilderList() { + return otherElements_; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public int getOtherElementsCount() { + return otherElements_.size(); + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getOtherElements(int index) { + return otherElements_.get(index); + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getOtherElementsOrBuilder( + int index) { + return otherElements_.get(index); + } + + private void initFields() { + stringElements_ = com.google.protobuf.LazyStringArrayList.EMPTY; + intElements_ = java.util.Collections.emptyList(); + longElements_ = java.util.Collections.emptyList(); + otherElements_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getOtherElementsCount(); i++) { + if (!getOtherElements(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < stringElements_.size(); i++) { + output.writeBytes(1, stringElements_.getByteString(i)); + } + if (getIntElementsList().size() > 0) { + output.writeRawVarint32(18); + output.writeRawVarint32(intElementsMemoizedSerializedSize); + } + for (int i = 0; i < intElements_.size(); i++) { + output.writeSInt32NoTag(intElements_.get(i)); + } + if (getLongElementsList().size() > 0) { + output.writeRawVarint32(26); + output.writeRawVarint32(longElementsMemoizedSerializedSize); + } + for (int i = 0; i < longElements_.size(); i++) { + output.writeSInt64NoTag(longElements_.get(i)); + } + for (int i = 0; i < otherElements_.size(); i++) { + output.writeMessage(4, otherElements_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < stringElements_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(stringElements_.getByteString(i)); + } + size += dataSize; + size += 1 * getStringElementsList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < intElements_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeSInt32SizeNoTag(intElements_.get(i)); + } + size += dataSize; + if (!getIntElementsList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream + .computeInt32SizeNoTag(dataSize); + } + intElementsMemoizedSerializedSize = dataSize; + } + { + int dataSize = 0; + for (int i = 0; i < longElements_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeSInt64SizeNoTag(longElements_.get(i)); + } + size += dataSize; + if (!getLongElementsList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream + .computeInt32SizeNoTag(dataSize); + } + longElementsMemoizedSerializedSize = dataSize; + } + for (int i = 0; i < otherElements_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, otherElements_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.GSet} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSetOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GSet_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getOtherElementsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + stringElements_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + intElements_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + longElements_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + if (otherElementsBuilder_ == null) { + otherElements_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + otherElementsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GSet_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + stringElements_ = new com.google.protobuf.UnmodifiableLazyStringList( + stringElements_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.stringElements_ = stringElements_; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + intElements_ = java.util.Collections.unmodifiableList(intElements_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.intElements_ = intElements_; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + longElements_ = java.util.Collections.unmodifiableList(longElements_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.longElements_ = longElements_; + if (otherElementsBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + otherElements_ = java.util.Collections.unmodifiableList(otherElements_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.otherElements_ = otherElements_; + } else { + result.otherElements_ = otherElementsBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet.getDefaultInstance()) return this; + if (!other.stringElements_.isEmpty()) { + if (stringElements_.isEmpty()) { + stringElements_ = other.stringElements_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureStringElementsIsMutable(); + stringElements_.addAll(other.stringElements_); + } + onChanged(); + } + if (!other.intElements_.isEmpty()) { + if (intElements_.isEmpty()) { + intElements_ = other.intElements_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureIntElementsIsMutable(); + intElements_.addAll(other.intElements_); + } + onChanged(); + } + if (!other.longElements_.isEmpty()) { + if (longElements_.isEmpty()) { + longElements_ = other.longElements_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureLongElementsIsMutable(); + longElements_.addAll(other.longElements_); + } + onChanged(); + } + if (otherElementsBuilder_ == null) { + if (!other.otherElements_.isEmpty()) { + if (otherElements_.isEmpty()) { + otherElements_ = other.otherElements_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureOtherElementsIsMutable(); + otherElements_.addAll(other.otherElements_); + } + onChanged(); + } + } else { + if (!other.otherElements_.isEmpty()) { + if (otherElementsBuilder_.isEmpty()) { + otherElementsBuilder_.dispose(); + otherElementsBuilder_ = null; + otherElements_ = other.otherElements_; + bitField0_ = (bitField0_ & ~0x00000008); + otherElementsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getOtherElementsFieldBuilder() : null; + } else { + otherElementsBuilder_.addAllMessages(other.otherElements_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getOtherElementsCount(); i++) { + if (!getOtherElements(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GSet) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated string stringElements = 1; + private com.google.protobuf.LazyStringList stringElements_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureStringElementsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + stringElements_ = new com.google.protobuf.LazyStringArrayList(stringElements_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string stringElements = 1; + */ + public java.util.List + getStringElementsList() { + return java.util.Collections.unmodifiableList(stringElements_); + } + /** + * repeated string stringElements = 1; + */ + public int getStringElementsCount() { + return stringElements_.size(); + } + /** + * repeated string stringElements = 1; + */ + public java.lang.String getStringElements(int index) { + return stringElements_.get(index); + } + /** + * repeated string stringElements = 1; + */ + public com.google.protobuf.ByteString + getStringElementsBytes(int index) { + return stringElements_.getByteString(index); + } + /** + * repeated string stringElements = 1; + */ + public Builder setStringElements( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStringElementsIsMutable(); + stringElements_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string stringElements = 1; + */ + public Builder addStringElements( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStringElementsIsMutable(); + stringElements_.add(value); + onChanged(); + return this; + } + /** + * repeated string stringElements = 1; + */ + public Builder addAllStringElements( + java.lang.Iterable values) { + ensureStringElementsIsMutable(); + super.addAll(values, stringElements_); + onChanged(); + return this; + } + /** + * repeated string stringElements = 1; + */ + public Builder clearStringElements() { + stringElements_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string stringElements = 1; + */ + public Builder addStringElementsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStringElementsIsMutable(); + stringElements_.add(value); + onChanged(); + return this; + } + + // repeated sint32 intElements = 2 [packed = true]; + private java.util.List intElements_ = java.util.Collections.emptyList(); + private void ensureIntElementsIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + intElements_ = new java.util.ArrayList(intElements_); + bitField0_ |= 0x00000002; + } + } + /** + * repeated sint32 intElements = 2 [packed = true]; + */ + public java.util.List + getIntElementsList() { + return java.util.Collections.unmodifiableList(intElements_); + } + /** + * repeated sint32 intElements = 2 [packed = true]; + */ + public int getIntElementsCount() { + return intElements_.size(); + } + /** + * repeated sint32 intElements = 2 [packed = true]; + */ + public int getIntElements(int index) { + return intElements_.get(index); + } + /** + * repeated sint32 intElements = 2 [packed = true]; + */ + public Builder setIntElements( + int index, int value) { + ensureIntElementsIsMutable(); + intElements_.set(index, value); + onChanged(); + return this; + } + /** + * repeated sint32 intElements = 2 [packed = true]; + */ + public Builder addIntElements(int value) { + ensureIntElementsIsMutable(); + intElements_.add(value); + onChanged(); + return this; + } + /** + * repeated sint32 intElements = 2 [packed = true]; + */ + public Builder addAllIntElements( + java.lang.Iterable values) { + ensureIntElementsIsMutable(); + super.addAll(values, intElements_); + onChanged(); + return this; + } + /** + * repeated sint32 intElements = 2 [packed = true]; + */ + public Builder clearIntElements() { + intElements_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + // repeated sint64 longElements = 3 [packed = true]; + private java.util.List longElements_ = java.util.Collections.emptyList(); + private void ensureLongElementsIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + longElements_ = new java.util.ArrayList(longElements_); + bitField0_ |= 0x00000004; + } + } + /** + * repeated sint64 longElements = 3 [packed = true]; + */ + public java.util.List + getLongElementsList() { + return java.util.Collections.unmodifiableList(longElements_); + } + /** + * repeated sint64 longElements = 3 [packed = true]; + */ + public int getLongElementsCount() { + return longElements_.size(); + } + /** + * repeated sint64 longElements = 3 [packed = true]; + */ + public long getLongElements(int index) { + return longElements_.get(index); + } + /** + * repeated sint64 longElements = 3 [packed = true]; + */ + public Builder setLongElements( + int index, long value) { + ensureLongElementsIsMutable(); + longElements_.set(index, value); + onChanged(); + return this; + } + /** + * repeated sint64 longElements = 3 [packed = true]; + */ + public Builder addLongElements(long value) { + ensureLongElementsIsMutable(); + longElements_.add(value); + onChanged(); + return this; + } + /** + * repeated sint64 longElements = 3 [packed = true]; + */ + public Builder addAllLongElements( + java.lang.Iterable values) { + ensureLongElementsIsMutable(); + super.addAll(values, longElements_); + onChanged(); + return this; + } + /** + * repeated sint64 longElements = 3 [packed = true]; + */ + public Builder clearLongElements() { + longElements_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + // repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + private java.util.List otherElements_ = + java.util.Collections.emptyList(); + private void ensureOtherElementsIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + otherElements_ = new java.util.ArrayList(otherElements_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> otherElementsBuilder_; + + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public java.util.List getOtherElementsList() { + if (otherElementsBuilder_ == null) { + return java.util.Collections.unmodifiableList(otherElements_); + } else { + return otherElementsBuilder_.getMessageList(); + } + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public int getOtherElementsCount() { + if (otherElementsBuilder_ == null) { + return otherElements_.size(); + } else { + return otherElementsBuilder_.getCount(); + } + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getOtherElements(int index) { + if (otherElementsBuilder_ == null) { + return otherElements_.get(index); + } else { + return otherElementsBuilder_.getMessage(index); + } + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public Builder setOtherElements( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (otherElementsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOtherElementsIsMutable(); + otherElements_.set(index, value); + onChanged(); + } else { + otherElementsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public Builder setOtherElements( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (otherElementsBuilder_ == null) { + ensureOtherElementsIsMutable(); + otherElements_.set(index, builderForValue.build()); + onChanged(); + } else { + otherElementsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public Builder addOtherElements(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (otherElementsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOtherElementsIsMutable(); + otherElements_.add(value); + onChanged(); + } else { + otherElementsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public Builder addOtherElements( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (otherElementsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOtherElementsIsMutable(); + otherElements_.add(index, value); + onChanged(); + } else { + otherElementsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public Builder addOtherElements( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (otherElementsBuilder_ == null) { + ensureOtherElementsIsMutable(); + otherElements_.add(builderForValue.build()); + onChanged(); + } else { + otherElementsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public Builder addOtherElements( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (otherElementsBuilder_ == null) { + ensureOtherElementsIsMutable(); + otherElements_.add(index, builderForValue.build()); + onChanged(); + } else { + otherElementsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public Builder addAllOtherElements( + java.lang.Iterable values) { + if (otherElementsBuilder_ == null) { + ensureOtherElementsIsMutable(); + super.addAll(values, otherElements_); + onChanged(); + } else { + otherElementsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public Builder clearOtherElements() { + if (otherElementsBuilder_ == null) { + otherElements_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + otherElementsBuilder_.clear(); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public Builder removeOtherElements(int index) { + if (otherElementsBuilder_ == null) { + ensureOtherElementsIsMutable(); + otherElements_.remove(index); + onChanged(); + } else { + otherElementsBuilder_.remove(index); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getOtherElementsBuilder( + int index) { + return getOtherElementsFieldBuilder().getBuilder(index); + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getOtherElementsOrBuilder( + int index) { + if (otherElementsBuilder_ == null) { + return otherElements_.get(index); } else { + return otherElementsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public java.util.List + getOtherElementsOrBuilderList() { + if (otherElementsBuilder_ != null) { + return otherElementsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(otherElements_); + } + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder addOtherElementsBuilder() { + return getOtherElementsFieldBuilder().addBuilder( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder addOtherElementsBuilder( + int index) { + return getOtherElementsFieldBuilder().addBuilder( + index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 4; + */ + public java.util.List + getOtherElementsBuilderList() { + return getOtherElementsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getOtherElementsFieldBuilder() { + if (otherElementsBuilder_ == null) { + otherElementsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + otherElements_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + otherElements_ = null; + } + return otherElementsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.GSet) + } + + static { + defaultInstance = new GSet(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.GSet) + } + + public interface ORSetOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.VersionVector vvector = 1; + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + boolean hasVvector(); + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector getVvector(); + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVectorOrBuilder getVvectorOrBuilder(); + + // repeated .akka.cluster.ddata.VersionVector dots = 2; + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + java.util.List + getDotsList(); + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector getDots(int index); + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + int getDotsCount(); + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + java.util.List + getDotsOrBuilderList(); + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVectorOrBuilder getDotsOrBuilder( + int index); + + // repeated string stringElements = 3; + /** + * repeated string stringElements = 3; + */ + java.util.List + getStringElementsList(); + /** + * repeated string stringElements = 3; + */ + int getStringElementsCount(); + /** + * repeated string stringElements = 3; + */ + java.lang.String getStringElements(int index); + /** + * repeated string stringElements = 3; + */ + com.google.protobuf.ByteString + getStringElementsBytes(int index); + + // repeated sint32 intElements = 4 [packed = true]; + /** + * repeated sint32 intElements = 4 [packed = true]; + */ + java.util.List getIntElementsList(); + /** + * repeated sint32 intElements = 4 [packed = true]; + */ + int getIntElementsCount(); + /** + * repeated sint32 intElements = 4 [packed = true]; + */ + int getIntElements(int index); + + // repeated sint64 longElements = 5 [packed = true]; + /** + * repeated sint64 longElements = 5 [packed = true]; + */ + java.util.List getLongElementsList(); + /** + * repeated sint64 longElements = 5 [packed = true]; + */ + int getLongElementsCount(); + /** + * repeated sint64 longElements = 5 [packed = true]; + */ + long getLongElements(int index); + + // repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + java.util.List + getOtherElementsList(); + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getOtherElements(int index); + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + int getOtherElementsCount(); + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + java.util.List + getOtherElementsOrBuilderList(); + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getOtherElementsOrBuilder( + int index); + } + /** + * Protobuf type {@code akka.cluster.ddata.ORSet} + */ + public static final class ORSet extends + com.google.protobuf.GeneratedMessage + implements ORSetOrBuilder { + // Use ORSet.newBuilder() to construct. + private ORSet(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ORSet(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ORSet defaultInstance; + public static ORSet getDefaultInstance() { + return defaultInstance; + } + + public ORSet getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ORSet( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = vvector_.toBuilder(); + } + vvector_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(vvector_); + vvector_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + dots_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + dots_.add(input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.PARSER, extensionRegistry)); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + stringElements_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000004; + } + stringElements_.add(input.readBytes()); + break; + } + case 32: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + intElements_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + intElements_.add(input.readSInt32()); + break; + } + case 34: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008) && input.getBytesUntilLimit() > 0) { + intElements_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + while (input.getBytesUntilLimit() > 0) { + intElements_.add(input.readSInt32()); + } + input.popLimit(limit); + break; + } + case 40: { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + longElements_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000010; + } + longElements_.add(input.readSInt64()); + break; + } + case 42: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010) && input.getBytesUntilLimit() > 0) { + longElements_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000010; + } + while (input.getBytesUntilLimit() > 0) { + longElements_.add(input.readSInt64()); + } + input.popLimit(limit); + break; + } + case 50: { + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + otherElements_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000020; + } + otherElements_.add(input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + dots_ = java.util.Collections.unmodifiableList(dots_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + stringElements_ = new com.google.protobuf.UnmodifiableLazyStringList(stringElements_); + } + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + intElements_ = java.util.Collections.unmodifiableList(intElements_); + } + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + longElements_ = java.util.Collections.unmodifiableList(longElements_); + } + if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + otherElements_ = java.util.Collections.unmodifiableList(otherElements_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORSet_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ORSet parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ORSet(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .akka.cluster.ddata.VersionVector vvector = 1; + public static final int VVECTOR_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector vvector_; + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + public boolean hasVvector() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector getVvector() { + return vvector_; + } + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVectorOrBuilder getVvectorOrBuilder() { + return vvector_; + } + + // repeated .akka.cluster.ddata.VersionVector dots = 2; + public static final int DOTS_FIELD_NUMBER = 2; + private java.util.List dots_; + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public java.util.List getDotsList() { + return dots_; + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public java.util.List + getDotsOrBuilderList() { + return dots_; + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public int getDotsCount() { + return dots_.size(); + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector getDots(int index) { + return dots_.get(index); + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVectorOrBuilder getDotsOrBuilder( + int index) { + return dots_.get(index); + } + + // repeated string stringElements = 3; + public static final int STRINGELEMENTS_FIELD_NUMBER = 3; + private com.google.protobuf.LazyStringList stringElements_; + /** + * repeated string stringElements = 3; + */ + public java.util.List + getStringElementsList() { + return stringElements_; + } + /** + * repeated string stringElements = 3; + */ + public int getStringElementsCount() { + return stringElements_.size(); + } + /** + * repeated string stringElements = 3; + */ + public java.lang.String getStringElements(int index) { + return stringElements_.get(index); + } + /** + * repeated string stringElements = 3; + */ + public com.google.protobuf.ByteString + getStringElementsBytes(int index) { + return stringElements_.getByteString(index); + } + + // repeated sint32 intElements = 4 [packed = true]; + public static final int INTELEMENTS_FIELD_NUMBER = 4; + private java.util.List intElements_; + /** + * repeated sint32 intElements = 4 [packed = true]; + */ + public java.util.List + getIntElementsList() { + return intElements_; + } + /** + * repeated sint32 intElements = 4 [packed = true]; + */ + public int getIntElementsCount() { + return intElements_.size(); + } + /** + * repeated sint32 intElements = 4 [packed = true]; + */ + public int getIntElements(int index) { + return intElements_.get(index); + } + private int intElementsMemoizedSerializedSize = -1; + + // repeated sint64 longElements = 5 [packed = true]; + public static final int LONGELEMENTS_FIELD_NUMBER = 5; + private java.util.List longElements_; + /** + * repeated sint64 longElements = 5 [packed = true]; + */ + public java.util.List + getLongElementsList() { + return longElements_; + } + /** + * repeated sint64 longElements = 5 [packed = true]; + */ + public int getLongElementsCount() { + return longElements_.size(); + } + /** + * repeated sint64 longElements = 5 [packed = true]; + */ + public long getLongElements(int index) { + return longElements_.get(index); + } + private int longElementsMemoizedSerializedSize = -1; + + // repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + public static final int OTHERELEMENTS_FIELD_NUMBER = 6; + private java.util.List otherElements_; + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public java.util.List getOtherElementsList() { + return otherElements_; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public java.util.List + getOtherElementsOrBuilderList() { + return otherElements_; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public int getOtherElementsCount() { + return otherElements_.size(); + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getOtherElements(int index) { + return otherElements_.get(index); + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getOtherElementsOrBuilder( + int index) { + return otherElements_.get(index); + } + + private void initFields() { + vvector_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.getDefaultInstance(); + dots_ = java.util.Collections.emptyList(); + stringElements_ = com.google.protobuf.LazyStringArrayList.EMPTY; + intElements_ = java.util.Collections.emptyList(); + longElements_ = java.util.Collections.emptyList(); + otherElements_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasVvector()) { + memoizedIsInitialized = 0; + return false; + } + if (!getVvector().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getDotsCount(); i++) { + if (!getDots(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getOtherElementsCount(); i++) { + if (!getOtherElements(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, vvector_); + } + for (int i = 0; i < dots_.size(); i++) { + output.writeMessage(2, dots_.get(i)); + } + for (int i = 0; i < stringElements_.size(); i++) { + output.writeBytes(3, stringElements_.getByteString(i)); + } + if (getIntElementsList().size() > 0) { + output.writeRawVarint32(34); + output.writeRawVarint32(intElementsMemoizedSerializedSize); + } + for (int i = 0; i < intElements_.size(); i++) { + output.writeSInt32NoTag(intElements_.get(i)); + } + if (getLongElementsList().size() > 0) { + output.writeRawVarint32(42); + output.writeRawVarint32(longElementsMemoizedSerializedSize); + } + for (int i = 0; i < longElements_.size(); i++) { + output.writeSInt64NoTag(longElements_.get(i)); + } + for (int i = 0; i < otherElements_.size(); i++) { + output.writeMessage(6, otherElements_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, vvector_); + } + for (int i = 0; i < dots_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, dots_.get(i)); + } + { + int dataSize = 0; + for (int i = 0; i < stringElements_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(stringElements_.getByteString(i)); + } + size += dataSize; + size += 1 * getStringElementsList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < intElements_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeSInt32SizeNoTag(intElements_.get(i)); + } + size += dataSize; + if (!getIntElementsList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream + .computeInt32SizeNoTag(dataSize); + } + intElementsMemoizedSerializedSize = dataSize; + } + { + int dataSize = 0; + for (int i = 0; i < longElements_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeSInt64SizeNoTag(longElements_.get(i)); + } + size += dataSize; + if (!getLongElementsList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream + .computeInt32SizeNoTag(dataSize); + } + longElementsMemoizedSerializedSize = dataSize; + } + for (int i = 0; i < otherElements_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, otherElements_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.ORSet} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORSet_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getVvectorFieldBuilder(); + getDotsFieldBuilder(); + getOtherElementsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (vvectorBuilder_ == null) { + vvector_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.getDefaultInstance(); + } else { + vvectorBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (dotsBuilder_ == null) { + dots_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + dotsBuilder_.clear(); + } + stringElements_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + intElements_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + longElements_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + if (otherElementsBuilder_ == null) { + otherElements_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + } else { + otherElementsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORSet_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (vvectorBuilder_ == null) { + result.vvector_ = vvector_; + } else { + result.vvector_ = vvectorBuilder_.build(); + } + if (dotsBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + dots_ = java.util.Collections.unmodifiableList(dots_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.dots_ = dots_; + } else { + result.dots_ = dotsBuilder_.build(); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + stringElements_ = new com.google.protobuf.UnmodifiableLazyStringList( + stringElements_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.stringElements_ = stringElements_; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + intElements_ = java.util.Collections.unmodifiableList(intElements_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.intElements_ = intElements_; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + longElements_ = java.util.Collections.unmodifiableList(longElements_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.longElements_ = longElements_; + if (otherElementsBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020)) { + otherElements_ = java.util.Collections.unmodifiableList(otherElements_); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.otherElements_ = otherElements_; + } else { + result.otherElements_ = otherElementsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance()) return this; + if (other.hasVvector()) { + mergeVvector(other.getVvector()); + } + if (dotsBuilder_ == null) { + if (!other.dots_.isEmpty()) { + if (dots_.isEmpty()) { + dots_ = other.dots_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureDotsIsMutable(); + dots_.addAll(other.dots_); + } + onChanged(); + } + } else { + if (!other.dots_.isEmpty()) { + if (dotsBuilder_.isEmpty()) { + dotsBuilder_.dispose(); + dotsBuilder_ = null; + dots_ = other.dots_; + bitField0_ = (bitField0_ & ~0x00000002); + dotsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getDotsFieldBuilder() : null; + } else { + dotsBuilder_.addAllMessages(other.dots_); + } + } + } + if (!other.stringElements_.isEmpty()) { + if (stringElements_.isEmpty()) { + stringElements_ = other.stringElements_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureStringElementsIsMutable(); + stringElements_.addAll(other.stringElements_); + } + onChanged(); + } + if (!other.intElements_.isEmpty()) { + if (intElements_.isEmpty()) { + intElements_ = other.intElements_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureIntElementsIsMutable(); + intElements_.addAll(other.intElements_); + } + onChanged(); + } + if (!other.longElements_.isEmpty()) { + if (longElements_.isEmpty()) { + longElements_ = other.longElements_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureLongElementsIsMutable(); + longElements_.addAll(other.longElements_); + } + onChanged(); + } + if (otherElementsBuilder_ == null) { + if (!other.otherElements_.isEmpty()) { + if (otherElements_.isEmpty()) { + otherElements_ = other.otherElements_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureOtherElementsIsMutable(); + otherElements_.addAll(other.otherElements_); + } + onChanged(); + } + } else { + if (!other.otherElements_.isEmpty()) { + if (otherElementsBuilder_.isEmpty()) { + otherElementsBuilder_.dispose(); + otherElementsBuilder_ = null; + otherElements_ = other.otherElements_; + bitField0_ = (bitField0_ & ~0x00000020); + otherElementsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getOtherElementsFieldBuilder() : null; + } else { + otherElementsBuilder_.addAllMessages(other.otherElements_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasVvector()) { + + return false; + } + if (!getVvector().isInitialized()) { + + return false; + } + for (int i = 0; i < getDotsCount(); i++) { + if (!getDots(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getOtherElementsCount(); i++) { + if (!getOtherElements(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.VersionVector vvector = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector vvector_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVectorOrBuilder> vvectorBuilder_; + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + public boolean hasVvector() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector getVvector() { + if (vvectorBuilder_ == null) { + return vvector_; + } else { + return vvectorBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + public Builder setVvector(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector value) { + if (vvectorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + vvector_ = value; + onChanged(); + } else { + vvectorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + public Builder setVvector( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder builderForValue) { + if (vvectorBuilder_ == null) { + vvector_ = builderForValue.build(); + onChanged(); + } else { + vvectorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + public Builder mergeVvector(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector value) { + if (vvectorBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + vvector_ != akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.getDefaultInstance()) { + vvector_ = + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.newBuilder(vvector_).mergeFrom(value).buildPartial(); + } else { + vvector_ = value; + } + onChanged(); + } else { + vvectorBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + public Builder clearVvector() { + if (vvectorBuilder_ == null) { + vvector_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.getDefaultInstance(); + onChanged(); + } else { + vvectorBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder getVvectorBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getVvectorFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVectorOrBuilder getVvectorOrBuilder() { + if (vvectorBuilder_ != null) { + return vvectorBuilder_.getMessageOrBuilder(); + } else { + return vvector_; + } + } + /** + * required .akka.cluster.ddata.VersionVector vvector = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVectorOrBuilder> + getVvectorFieldBuilder() { + if (vvectorBuilder_ == null) { + vvectorBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVectorOrBuilder>( + vvector_, + getParentForChildren(), + isClean()); + vvector_ = null; + } + return vvectorBuilder_; + } + + // repeated .akka.cluster.ddata.VersionVector dots = 2; + private java.util.List dots_ = + java.util.Collections.emptyList(); + private void ensureDotsIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + dots_ = new java.util.ArrayList(dots_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVectorOrBuilder> dotsBuilder_; + + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public java.util.List getDotsList() { + if (dotsBuilder_ == null) { + return java.util.Collections.unmodifiableList(dots_); + } else { + return dotsBuilder_.getMessageList(); + } + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public int getDotsCount() { + if (dotsBuilder_ == null) { + return dots_.size(); + } else { + return dotsBuilder_.getCount(); + } + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector getDots(int index) { + if (dotsBuilder_ == null) { + return dots_.get(index); + } else { + return dotsBuilder_.getMessage(index); + } + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public Builder setDots( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector value) { + if (dotsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDotsIsMutable(); + dots_.set(index, value); + onChanged(); + } else { + dotsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public Builder setDots( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder builderForValue) { + if (dotsBuilder_ == null) { + ensureDotsIsMutable(); + dots_.set(index, builderForValue.build()); + onChanged(); + } else { + dotsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public Builder addDots(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector value) { + if (dotsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDotsIsMutable(); + dots_.add(value); + onChanged(); + } else { + dotsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public Builder addDots( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector value) { + if (dotsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDotsIsMutable(); + dots_.add(index, value); + onChanged(); + } else { + dotsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public Builder addDots( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder builderForValue) { + if (dotsBuilder_ == null) { + ensureDotsIsMutable(); + dots_.add(builderForValue.build()); + onChanged(); + } else { + dotsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public Builder addDots( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder builderForValue) { + if (dotsBuilder_ == null) { + ensureDotsIsMutable(); + dots_.add(index, builderForValue.build()); + onChanged(); + } else { + dotsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public Builder addAllDots( + java.lang.Iterable values) { + if (dotsBuilder_ == null) { + ensureDotsIsMutable(); + super.addAll(values, dots_); + onChanged(); + } else { + dotsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public Builder clearDots() { + if (dotsBuilder_ == null) { + dots_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + dotsBuilder_.clear(); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public Builder removeDots(int index) { + if (dotsBuilder_ == null) { + ensureDotsIsMutable(); + dots_.remove(index); + onChanged(); + } else { + dotsBuilder_.remove(index); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder getDotsBuilder( + int index) { + return getDotsFieldBuilder().getBuilder(index); + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVectorOrBuilder getDotsOrBuilder( + int index) { + if (dotsBuilder_ == null) { + return dots_.get(index); } else { + return dotsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public java.util.List + getDotsOrBuilderList() { + if (dotsBuilder_ != null) { + return dotsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(dots_); + } + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder addDotsBuilder() { + return getDotsFieldBuilder().addBuilder( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder addDotsBuilder( + int index) { + return getDotsFieldBuilder().addBuilder( + index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.VersionVector dots = 2; + */ + public java.util.List + getDotsBuilderList() { + return getDotsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVectorOrBuilder> + getDotsFieldBuilder() { + if (dotsBuilder_ == null) { + dotsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVectorOrBuilder>( + dots_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + dots_ = null; + } + return dotsBuilder_; + } + + // repeated string stringElements = 3; + private com.google.protobuf.LazyStringList stringElements_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureStringElementsIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + stringElements_ = new com.google.protobuf.LazyStringArrayList(stringElements_); + bitField0_ |= 0x00000004; + } + } + /** + * repeated string stringElements = 3; + */ + public java.util.List + getStringElementsList() { + return java.util.Collections.unmodifiableList(stringElements_); + } + /** + * repeated string stringElements = 3; + */ + public int getStringElementsCount() { + return stringElements_.size(); + } + /** + * repeated string stringElements = 3; + */ + public java.lang.String getStringElements(int index) { + return stringElements_.get(index); + } + /** + * repeated string stringElements = 3; + */ + public com.google.protobuf.ByteString + getStringElementsBytes(int index) { + return stringElements_.getByteString(index); + } + /** + * repeated string stringElements = 3; + */ + public Builder setStringElements( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStringElementsIsMutable(); + stringElements_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string stringElements = 3; + */ + public Builder addStringElements( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStringElementsIsMutable(); + stringElements_.add(value); + onChanged(); + return this; + } + /** + * repeated string stringElements = 3; + */ + public Builder addAllStringElements( + java.lang.Iterable values) { + ensureStringElementsIsMutable(); + super.addAll(values, stringElements_); + onChanged(); + return this; + } + /** + * repeated string stringElements = 3; + */ + public Builder clearStringElements() { + stringElements_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * repeated string stringElements = 3; + */ + public Builder addStringElementsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStringElementsIsMutable(); + stringElements_.add(value); + onChanged(); + return this; + } + + // repeated sint32 intElements = 4 [packed = true]; + private java.util.List intElements_ = java.util.Collections.emptyList(); + private void ensureIntElementsIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + intElements_ = new java.util.ArrayList(intElements_); + bitField0_ |= 0x00000008; + } + } + /** + * repeated sint32 intElements = 4 [packed = true]; + */ + public java.util.List + getIntElementsList() { + return java.util.Collections.unmodifiableList(intElements_); + } + /** + * repeated sint32 intElements = 4 [packed = true]; + */ + public int getIntElementsCount() { + return intElements_.size(); + } + /** + * repeated sint32 intElements = 4 [packed = true]; + */ + public int getIntElements(int index) { + return intElements_.get(index); + } + /** + * repeated sint32 intElements = 4 [packed = true]; + */ + public Builder setIntElements( + int index, int value) { + ensureIntElementsIsMutable(); + intElements_.set(index, value); + onChanged(); + return this; + } + /** + * repeated sint32 intElements = 4 [packed = true]; + */ + public Builder addIntElements(int value) { + ensureIntElementsIsMutable(); + intElements_.add(value); + onChanged(); + return this; + } + /** + * repeated sint32 intElements = 4 [packed = true]; + */ + public Builder addAllIntElements( + java.lang.Iterable values) { + ensureIntElementsIsMutable(); + super.addAll(values, intElements_); + onChanged(); + return this; + } + /** + * repeated sint32 intElements = 4 [packed = true]; + */ + public Builder clearIntElements() { + intElements_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + // repeated sint64 longElements = 5 [packed = true]; + private java.util.List longElements_ = java.util.Collections.emptyList(); + private void ensureLongElementsIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + longElements_ = new java.util.ArrayList(longElements_); + bitField0_ |= 0x00000010; + } + } + /** + * repeated sint64 longElements = 5 [packed = true]; + */ + public java.util.List + getLongElementsList() { + return java.util.Collections.unmodifiableList(longElements_); + } + /** + * repeated sint64 longElements = 5 [packed = true]; + */ + public int getLongElementsCount() { + return longElements_.size(); + } + /** + * repeated sint64 longElements = 5 [packed = true]; + */ + public long getLongElements(int index) { + return longElements_.get(index); + } + /** + * repeated sint64 longElements = 5 [packed = true]; + */ + public Builder setLongElements( + int index, long value) { + ensureLongElementsIsMutable(); + longElements_.set(index, value); + onChanged(); + return this; + } + /** + * repeated sint64 longElements = 5 [packed = true]; + */ + public Builder addLongElements(long value) { + ensureLongElementsIsMutable(); + longElements_.add(value); + onChanged(); + return this; + } + /** + * repeated sint64 longElements = 5 [packed = true]; + */ + public Builder addAllLongElements( + java.lang.Iterable values) { + ensureLongElementsIsMutable(); + super.addAll(values, longElements_); + onChanged(); + return this; + } + /** + * repeated sint64 longElements = 5 [packed = true]; + */ + public Builder clearLongElements() { + longElements_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + // repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + private java.util.List otherElements_ = + java.util.Collections.emptyList(); + private void ensureOtherElementsIsMutable() { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { + otherElements_ = new java.util.ArrayList(otherElements_); + bitField0_ |= 0x00000020; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> otherElementsBuilder_; + + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public java.util.List getOtherElementsList() { + if (otherElementsBuilder_ == null) { + return java.util.Collections.unmodifiableList(otherElements_); + } else { + return otherElementsBuilder_.getMessageList(); + } + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public int getOtherElementsCount() { + if (otherElementsBuilder_ == null) { + return otherElements_.size(); + } else { + return otherElementsBuilder_.getCount(); + } + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getOtherElements(int index) { + if (otherElementsBuilder_ == null) { + return otherElements_.get(index); + } else { + return otherElementsBuilder_.getMessage(index); + } + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public Builder setOtherElements( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (otherElementsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOtherElementsIsMutable(); + otherElements_.set(index, value); + onChanged(); + } else { + otherElementsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public Builder setOtherElements( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (otherElementsBuilder_ == null) { + ensureOtherElementsIsMutable(); + otherElements_.set(index, builderForValue.build()); + onChanged(); + } else { + otherElementsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public Builder addOtherElements(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (otherElementsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOtherElementsIsMutable(); + otherElements_.add(value); + onChanged(); + } else { + otherElementsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public Builder addOtherElements( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (otherElementsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOtherElementsIsMutable(); + otherElements_.add(index, value); + onChanged(); + } else { + otherElementsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public Builder addOtherElements( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (otherElementsBuilder_ == null) { + ensureOtherElementsIsMutable(); + otherElements_.add(builderForValue.build()); + onChanged(); + } else { + otherElementsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public Builder addOtherElements( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (otherElementsBuilder_ == null) { + ensureOtherElementsIsMutable(); + otherElements_.add(index, builderForValue.build()); + onChanged(); + } else { + otherElementsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public Builder addAllOtherElements( + java.lang.Iterable values) { + if (otherElementsBuilder_ == null) { + ensureOtherElementsIsMutable(); + super.addAll(values, otherElements_); + onChanged(); + } else { + otherElementsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public Builder clearOtherElements() { + if (otherElementsBuilder_ == null) { + otherElements_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + } else { + otherElementsBuilder_.clear(); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public Builder removeOtherElements(int index) { + if (otherElementsBuilder_ == null) { + ensureOtherElementsIsMutable(); + otherElements_.remove(index); + onChanged(); + } else { + otherElementsBuilder_.remove(index); + } + return this; + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getOtherElementsBuilder( + int index) { + return getOtherElementsFieldBuilder().getBuilder(index); + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getOtherElementsOrBuilder( + int index) { + if (otherElementsBuilder_ == null) { + return otherElements_.get(index); } else { + return otherElementsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public java.util.List + getOtherElementsOrBuilderList() { + if (otherElementsBuilder_ != null) { + return otherElementsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(otherElements_); + } + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder addOtherElementsBuilder() { + return getOtherElementsFieldBuilder().addBuilder( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder addOtherElementsBuilder( + int index) { + return getOtherElementsFieldBuilder().addBuilder( + index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.OtherMessage otherElements = 6; + */ + public java.util.List + getOtherElementsBuilderList() { + return getOtherElementsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getOtherElementsFieldBuilder() { + if (otherElementsBuilder_ == null) { + otherElementsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + otherElements_, + ((bitField0_ & 0x00000020) == 0x00000020), + getParentForChildren(), + isClean()); + otherElements_ = null; + } + return otherElementsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.ORSet) + } + + static { + defaultInstance = new ORSet(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.ORSet) + } + + public interface FlagOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool enabled = 1; + /** + * required bool enabled = 1; + */ + boolean hasEnabled(); + /** + * required bool enabled = 1; + */ + boolean getEnabled(); + } + /** + * Protobuf type {@code akka.cluster.ddata.Flag} + */ + public static final class Flag extends + com.google.protobuf.GeneratedMessage + implements FlagOrBuilder { + // Use Flag.newBuilder() to construct. + private Flag(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Flag(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Flag defaultInstance; + public static Flag getDefaultInstance() { + return defaultInstance; + } + + public Flag getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Flag( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + enabled_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_Flag_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_Flag_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Flag parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Flag(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool enabled = 1; + public static final int ENABLED_FIELD_NUMBER = 1; + private boolean enabled_; + /** + * required bool enabled = 1; + */ + public boolean hasEnabled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool enabled = 1; + */ + public boolean getEnabled() { + return enabled_; + } + + private void initFields() { + enabled_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasEnabled()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, enabled_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, enabled_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.Flag} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.FlagOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_Flag_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_Flag_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + enabled_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_Flag_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.enabled_ = enabled_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag.getDefaultInstance()) return this; + if (other.hasEnabled()) { + setEnabled(other.getEnabled()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasEnabled()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.Flag) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool enabled = 1; + private boolean enabled_ ; + /** + * required bool enabled = 1; + */ + public boolean hasEnabled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool enabled = 1; + */ + public boolean getEnabled() { + return enabled_; + } + /** + * required bool enabled = 1; + */ + public Builder setEnabled(boolean value) { + bitField0_ |= 0x00000001; + enabled_ = value; + onChanged(); + return this; + } + /** + * required bool enabled = 1; + */ + public Builder clearEnabled() { + bitField0_ = (bitField0_ & ~0x00000001); + enabled_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.Flag) + } + + static { + defaultInstance = new Flag(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.Flag) + } + + public interface LWWRegisterOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required sint64 timestamp = 1; + /** + * required sint64 timestamp = 1; + */ + boolean hasTimestamp(); + /** + * required sint64 timestamp = 1; + */ + long getTimestamp(); + + // required .akka.cluster.ddata.UniqueAddress node = 2; + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + boolean hasNode(); + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getNode(); + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getNodeOrBuilder(); + + // required .akka.cluster.ddata.OtherMessage state = 3; + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + boolean hasState(); + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getState(); + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getStateOrBuilder(); + } + /** + * Protobuf type {@code akka.cluster.ddata.LWWRegister} + */ + public static final class LWWRegister extends + com.google.protobuf.GeneratedMessage + implements LWWRegisterOrBuilder { + // Use LWWRegister.newBuilder() to construct. + private LWWRegister(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private LWWRegister(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final LWWRegister defaultInstance; + public static LWWRegister getDefaultInstance() { + return defaultInstance; + } + + public LWWRegister getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private LWWRegister( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + timestamp_ = input.readSInt64(); + break; + } + case 18: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = node_.toBuilder(); + } + node_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(node_); + node_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = state_.toBuilder(); + } + state_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(state_); + state_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWRegister_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWRegister_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public LWWRegister parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new LWWRegister(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required sint64 timestamp = 1; + public static final int TIMESTAMP_FIELD_NUMBER = 1; + private long timestamp_; + /** + * required sint64 timestamp = 1; + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required sint64 timestamp = 1; + */ + public long getTimestamp() { + return timestamp_; + } + + // required .akka.cluster.ddata.UniqueAddress node = 2; + public static final int NODE_FIELD_NUMBER = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress node_; + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + public boolean hasNode() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getNode() { + return node_; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getNodeOrBuilder() { + return node_; + } + + // required .akka.cluster.ddata.OtherMessage state = 3; + public static final int STATE_FIELD_NUMBER = 3; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage state_; + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getState() { + return state_; + } + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getStateOrBuilder() { + return state_; + } + + private void initFields() { + timestamp_ = 0L; + node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + state_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTimestamp()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasNode()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasState()) { + memoizedIsInitialized = 0; + return false; + } + if (!getNode().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getState().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeSInt64(1, timestamp_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, node_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, state_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeSInt64Size(1, timestamp_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, node_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, state_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.LWWRegister} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegisterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWRegister_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWRegister_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getNodeFieldBuilder(); + getStateFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + timestamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + if (nodeBuilder_ == null) { + node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + } else { + nodeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (stateBuilder_ == null) { + state_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + stateBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWRegister_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.timestamp_ = timestamp_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (nodeBuilder_ == null) { + result.node_ = node_; + } else { + result.node_ = nodeBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (stateBuilder_ == null) { + result.state_ = state_; + } else { + result.state_ = stateBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.getDefaultInstance()) return this; + if (other.hasTimestamp()) { + setTimestamp(other.getTimestamp()); + } + if (other.hasNode()) { + mergeNode(other.getNode()); + } + if (other.hasState()) { + mergeState(other.getState()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTimestamp()) { + + return false; + } + if (!hasNode()) { + + return false; + } + if (!hasState()) { + + return false; + } + if (!getNode().isInitialized()) { + + return false; + } + if (!getState().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required sint64 timestamp = 1; + private long timestamp_ ; + /** + * required sint64 timestamp = 1; + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required sint64 timestamp = 1; + */ + public long getTimestamp() { + return timestamp_; + } + /** + * required sint64 timestamp = 1; + */ + public Builder setTimestamp(long value) { + bitField0_ |= 0x00000001; + timestamp_ = value; + onChanged(); + return this; + } + /** + * required sint64 timestamp = 1; + */ + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000001); + timestamp_ = 0L; + onChanged(); + return this; + } + + // required .akka.cluster.ddata.UniqueAddress node = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder> nodeBuilder_; + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + public boolean hasNode() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getNode() { + if (nodeBuilder_ == null) { + return node_; + } else { + return nodeBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + public Builder setNode(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress value) { + if (nodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + node_ = value; + onChanged(); + } else { + nodeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + public Builder setNode( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder builderForValue) { + if (nodeBuilder_ == null) { + node_ = builderForValue.build(); + onChanged(); + } else { + nodeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + public Builder mergeNode(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress value) { + if (nodeBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + node_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance()) { + node_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.newBuilder(node_).mergeFrom(value).buildPartial(); + } else { + node_ = value; + } + onChanged(); + } else { + nodeBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + public Builder clearNode() { + if (nodeBuilder_ == null) { + node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + onChanged(); + } else { + nodeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder getNodeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getNodeFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getNodeOrBuilder() { + if (nodeBuilder_ != null) { + return nodeBuilder_.getMessageOrBuilder(); + } else { + return node_; + } + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder> + getNodeFieldBuilder() { + if (nodeBuilder_ == null) { + nodeBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder>( + node_, + getParentForChildren(), + isClean()); + node_ = null; + } + return nodeBuilder_; + } + + // required .akka.cluster.ddata.OtherMessage state = 3; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage state_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> stateBuilder_; + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getState() { + if (stateBuilder_ == null) { + return state_; + } else { + return stateBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + public Builder setState(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (stateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + state_ = value; + onChanged(); + } else { + stateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + public Builder setState( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (stateBuilder_ == null) { + state_ = builderForValue.build(); + onChanged(); + } else { + stateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + public Builder mergeState(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (stateBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + state_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + state_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(state_).mergeFrom(value).buildPartial(); + } else { + state_ = value; + } + onChanged(); + } else { + stateBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + public Builder clearState() { + if (stateBuilder_ == null) { + state_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + stateBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getStateBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getStateFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getStateOrBuilder() { + if (stateBuilder_ != null) { + return stateBuilder_.getMessageOrBuilder(); + } else { + return state_; + } + } + /** + * required .akka.cluster.ddata.OtherMessage state = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getStateFieldBuilder() { + if (stateBuilder_ == null) { + stateBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + state_, + getParentForChildren(), + isClean()); + state_ = null; + } + return stateBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.LWWRegister) + } + + static { + defaultInstance = new LWWRegister(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.LWWRegister) + } + + public interface GCounterOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + java.util.List + getEntriesList(); + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry getEntries(int index); + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + int getEntriesCount(); + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + java.util.List + getEntriesOrBuilderList(); + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.EntryOrBuilder getEntriesOrBuilder( + int index); + } + /** + * Protobuf type {@code akka.cluster.ddata.GCounter} + */ + public static final class GCounter extends + com.google.protobuf.GeneratedMessage + implements GCounterOrBuilder { + // Use GCounter.newBuilder() to construct. + private GCounter(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GCounter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GCounter defaultInstance; + public static GCounter getDefaultInstance() { + return defaultInstance; + } + + public GCounter getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GCounter( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + entries_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + entries_.add(input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + entries_ = java.util.Collections.unmodifiableList(entries_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GCounter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GCounter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GCounter parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GCounter(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface EntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.UniqueAddress node = 1; + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + boolean hasNode(); + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getNode(); + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getNodeOrBuilder(); + + // required bytes value = 2; + /** + * required bytes value = 2; + */ + boolean hasValue(); + /** + * required bytes value = 2; + */ + com.google.protobuf.ByteString getValue(); + } + /** + * Protobuf type {@code akka.cluster.ddata.GCounter.Entry} + */ + public static final class Entry extends + com.google.protobuf.GeneratedMessage + implements EntryOrBuilder { + // Use Entry.newBuilder() to construct. + private Entry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Entry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Entry defaultInstance; + public static Entry getDefaultInstance() { + return defaultInstance; + } + + public Entry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Entry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = node_.toBuilder(); + } + node_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(node_); + node_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + bitField0_ |= 0x00000002; + value_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GCounter_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GCounter_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Entry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Entry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .akka.cluster.ddata.UniqueAddress node = 1; + public static final int NODE_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress node_; + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public boolean hasNode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getNode() { + return node_; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getNodeOrBuilder() { + return node_; + } + + // required bytes value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString value_; + /** + * required bytes value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes value = 2; + */ + public com.google.protobuf.ByteString getValue() { + return value_; + } + + private void initFields() { + node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + value_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasNode()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; + } + if (!getNode().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, node_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, value_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, node_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, value_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.GCounter.Entry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.EntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GCounter_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GCounter_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getNodeFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (nodeBuilder_ == null) { + node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + } else { + nodeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + value_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GCounter_Entry_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (nodeBuilder_ == null) { + result.node_ = node_; + } else { + result.node_ = nodeBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.getDefaultInstance()) return this; + if (other.hasNode()) { + mergeNode(other.getNode()); + } + if (other.hasValue()) { + setValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasNode()) { + + return false; + } + if (!hasValue()) { + + return false; + } + if (!getNode().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.UniqueAddress node = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder> nodeBuilder_; + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public boolean hasNode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getNode() { + if (nodeBuilder_ == null) { + return node_; + } else { + return nodeBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public Builder setNode(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress value) { + if (nodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + node_ = value; + onChanged(); + } else { + nodeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public Builder setNode( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder builderForValue) { + if (nodeBuilder_ == null) { + node_ = builderForValue.build(); + onChanged(); + } else { + nodeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public Builder mergeNode(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress value) { + if (nodeBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + node_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance()) { + node_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.newBuilder(node_).mergeFrom(value).buildPartial(); + } else { + node_ = value; + } + onChanged(); + } else { + nodeBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public Builder clearNode() { + if (nodeBuilder_ == null) { + node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + onChanged(); + } else { + nodeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder getNodeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getNodeFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getNodeOrBuilder() { + if (nodeBuilder_ != null) { + return nodeBuilder_.getMessageOrBuilder(); + } else { + return node_; + } + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder> + getNodeFieldBuilder() { + if (nodeBuilder_ == null) { + nodeBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder>( + node_, + getParentForChildren(), + isClean()); + node_ = null; + } + return nodeBuilder_; + } + + // required bytes value = 2; + private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes value = 2; + */ + public com.google.protobuf.ByteString getValue() { + return value_; + } + /** + * required bytes value = 2; + */ + public Builder setValue(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + /** + * required bytes value = 2; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000002); + value_ = getDefaultInstance().getValue(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.GCounter.Entry) + } + + static { + defaultInstance = new Entry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.GCounter.Entry) + } + + // repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + public static final int ENTRIES_FIELD_NUMBER = 1; + private java.util.List entries_; + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public java.util.List getEntriesList() { + return entries_; + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public java.util.List + getEntriesOrBuilderList() { + return entries_; + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public int getEntriesCount() { + return entries_.size(); + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry getEntries(int index) { + return entries_.get(index); + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.EntryOrBuilder getEntriesOrBuilder( + int index) { + return entries_.get(index); + } + + private void initFields() { + entries_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getEntriesCount(); i++) { + if (!getEntries(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < entries_.size(); i++) { + output.writeMessage(1, entries_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < entries_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, entries_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.GCounter} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GCounter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GCounter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getEntriesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (entriesBuilder_ == null) { + entries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + entriesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_GCounter_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter(this); + int from_bitField0_ = bitField0_; + if (entriesBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + entries_ = java.util.Collections.unmodifiableList(entries_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.entries_ = entries_; + } else { + result.entries_ = entriesBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.getDefaultInstance()) return this; + if (entriesBuilder_ == null) { + if (!other.entries_.isEmpty()) { + if (entries_.isEmpty()) { + entries_ = other.entries_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureEntriesIsMutable(); + entries_.addAll(other.entries_); + } + onChanged(); + } + } else { + if (!other.entries_.isEmpty()) { + if (entriesBuilder_.isEmpty()) { + entriesBuilder_.dispose(); + entriesBuilder_ = null; + entries_ = other.entries_; + bitField0_ = (bitField0_ & ~0x00000001); + entriesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getEntriesFieldBuilder() : null; + } else { + entriesBuilder_.addAllMessages(other.entries_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getEntriesCount(); i++) { + if (!getEntries(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + private java.util.List entries_ = + java.util.Collections.emptyList(); + private void ensureEntriesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + entries_ = new java.util.ArrayList(entries_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.EntryOrBuilder> entriesBuilder_; + + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public java.util.List getEntriesList() { + if (entriesBuilder_ == null) { + return java.util.Collections.unmodifiableList(entries_); + } else { + return entriesBuilder_.getMessageList(); + } + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public int getEntriesCount() { + if (entriesBuilder_ == null) { + return entries_.size(); + } else { + return entriesBuilder_.getCount(); + } + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry getEntries(int index) { + if (entriesBuilder_ == null) { + return entries_.get(index); + } else { + return entriesBuilder_.getMessage(index); + } + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public Builder setEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.set(index, value); + onChanged(); + } else { + entriesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public Builder setEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.set(index, builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public Builder addEntries(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.add(value); + onChanged(); + } else { + entriesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public Builder addEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.add(index, value); + onChanged(); + } else { + entriesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public Builder addEntries( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.add(builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public Builder addEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.add(index, builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public Builder addAllEntries( + java.lang.Iterable values) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + super.addAll(values, entries_); + onChanged(); + } else { + entriesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public Builder clearEntries() { + if (entriesBuilder_ == null) { + entries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + entriesBuilder_.clear(); + } + return this; + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public Builder removeEntries(int index) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.remove(index); + onChanged(); + } else { + entriesBuilder_.remove(index); + } + return this; + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.Builder getEntriesBuilder( + int index) { + return getEntriesFieldBuilder().getBuilder(index); + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.EntryOrBuilder getEntriesOrBuilder( + int index) { + if (entriesBuilder_ == null) { + return entries_.get(index); } else { + return entriesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public java.util.List + getEntriesOrBuilderList() { + if (entriesBuilder_ != null) { + return entriesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(entries_); + } + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.Builder addEntriesBuilder() { + return getEntriesFieldBuilder().addBuilder( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.Builder addEntriesBuilder( + int index) { + return getEntriesFieldBuilder().addBuilder( + index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.GCounter.Entry entries = 1; + */ + public java.util.List + getEntriesBuilderList() { + return getEntriesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.EntryOrBuilder> + getEntriesFieldBuilder() { + if (entriesBuilder_ == null) { + entriesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.EntryOrBuilder>( + entries_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + entries_ = null; + } + return entriesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.GCounter) + } + + static { + defaultInstance = new GCounter(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.GCounter) + } + + public interface PNCounterOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.GCounter increments = 1; + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + boolean hasIncrements(); + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter getIncrements(); + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounterOrBuilder getIncrementsOrBuilder(); + + // required .akka.cluster.ddata.GCounter decrements = 2; + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + boolean hasDecrements(); + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter getDecrements(); + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounterOrBuilder getDecrementsOrBuilder(); + } + /** + * Protobuf type {@code akka.cluster.ddata.PNCounter} + */ + public static final class PNCounter extends + com.google.protobuf.GeneratedMessage + implements PNCounterOrBuilder { + // Use PNCounter.newBuilder() to construct. + private PNCounter(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private PNCounter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final PNCounter defaultInstance; + public static PNCounter getDefaultInstance() { + return defaultInstance; + } + + public PNCounter getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PNCounter( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = increments_.toBuilder(); + } + increments_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(increments_); + increments_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = decrements_.toBuilder(); + } + decrements_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(decrements_); + decrements_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PNCounter parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PNCounter(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .akka.cluster.ddata.GCounter increments = 1; + public static final int INCREMENTS_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter increments_; + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + public boolean hasIncrements() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter getIncrements() { + return increments_; + } + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounterOrBuilder getIncrementsOrBuilder() { + return increments_; + } + + // required .akka.cluster.ddata.GCounter decrements = 2; + public static final int DECREMENTS_FIELD_NUMBER = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter decrements_; + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + public boolean hasDecrements() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter getDecrements() { + return decrements_; + } + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounterOrBuilder getDecrementsOrBuilder() { + return decrements_; + } + + private void initFields() { + increments_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.getDefaultInstance(); + decrements_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasIncrements()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasDecrements()) { + memoizedIsInitialized = 0; + return false; + } + if (!getIncrements().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getDecrements().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, increments_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, decrements_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, increments_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, decrements_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.PNCounter} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getIncrementsFieldBuilder(); + getDecrementsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (incrementsBuilder_ == null) { + increments_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.getDefaultInstance(); + } else { + incrementsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (decrementsBuilder_ == null) { + decrements_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.getDefaultInstance(); + } else { + decrementsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounter_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (incrementsBuilder_ == null) { + result.increments_ = increments_; + } else { + result.increments_ = incrementsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (decrementsBuilder_ == null) { + result.decrements_ = decrements_; + } else { + result.decrements_ = decrementsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.getDefaultInstance()) return this; + if (other.hasIncrements()) { + mergeIncrements(other.getIncrements()); + } + if (other.hasDecrements()) { + mergeDecrements(other.getDecrements()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasIncrements()) { + + return false; + } + if (!hasDecrements()) { + + return false; + } + if (!getIncrements().isInitialized()) { + + return false; + } + if (!getDecrements().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.GCounter increments = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter increments_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounterOrBuilder> incrementsBuilder_; + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + public boolean hasIncrements() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter getIncrements() { + if (incrementsBuilder_ == null) { + return increments_; + } else { + return incrementsBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + public Builder setIncrements(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter value) { + if (incrementsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + increments_ = value; + onChanged(); + } else { + incrementsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + public Builder setIncrements( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Builder builderForValue) { + if (incrementsBuilder_ == null) { + increments_ = builderForValue.build(); + onChanged(); + } else { + incrementsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + public Builder mergeIncrements(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter value) { + if (incrementsBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + increments_ != akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.getDefaultInstance()) { + increments_ = + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.newBuilder(increments_).mergeFrom(value).buildPartial(); + } else { + increments_ = value; + } + onChanged(); + } else { + incrementsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + public Builder clearIncrements() { + if (incrementsBuilder_ == null) { + increments_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.getDefaultInstance(); + onChanged(); + } else { + incrementsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Builder getIncrementsBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getIncrementsFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounterOrBuilder getIncrementsOrBuilder() { + if (incrementsBuilder_ != null) { + return incrementsBuilder_.getMessageOrBuilder(); + } else { + return increments_; + } + } + /** + * required .akka.cluster.ddata.GCounter increments = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounterOrBuilder> + getIncrementsFieldBuilder() { + if (incrementsBuilder_ == null) { + incrementsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounterOrBuilder>( + increments_, + getParentForChildren(), + isClean()); + increments_ = null; + } + return incrementsBuilder_; + } + + // required .akka.cluster.ddata.GCounter decrements = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter decrements_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounterOrBuilder> decrementsBuilder_; + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + public boolean hasDecrements() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter getDecrements() { + if (decrementsBuilder_ == null) { + return decrements_; + } else { + return decrementsBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + public Builder setDecrements(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter value) { + if (decrementsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + decrements_ = value; + onChanged(); + } else { + decrementsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + public Builder setDecrements( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Builder builderForValue) { + if (decrementsBuilder_ == null) { + decrements_ = builderForValue.build(); + onChanged(); + } else { + decrementsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + public Builder mergeDecrements(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter value) { + if (decrementsBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + decrements_ != akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.getDefaultInstance()) { + decrements_ = + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.newBuilder(decrements_).mergeFrom(value).buildPartial(); + } else { + decrements_ = value; + } + onChanged(); + } else { + decrementsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + public Builder clearDecrements() { + if (decrementsBuilder_ == null) { + decrements_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.getDefaultInstance(); + onChanged(); + } else { + decrementsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Builder getDecrementsBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getDecrementsFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounterOrBuilder getDecrementsOrBuilder() { + if (decrementsBuilder_ != null) { + return decrementsBuilder_.getMessageOrBuilder(); + } else { + return decrements_; + } + } + /** + * required .akka.cluster.ddata.GCounter decrements = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounterOrBuilder> + getDecrementsFieldBuilder() { + if (decrementsBuilder_ == null) { + decrementsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounter.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.GCounterOrBuilder>( + decrements_, + getParentForChildren(), + isClean()); + decrements_ = null; + } + return decrementsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.PNCounter) + } + + static { + defaultInstance = new PNCounter(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.PNCounter) + } + + public interface VersionVectorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + java.util.List + getEntriesList(); + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry getEntries(int index); + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + int getEntriesCount(); + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + java.util.List + getEntriesOrBuilderList(); + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.EntryOrBuilder getEntriesOrBuilder( + int index); + } + /** + * Protobuf type {@code akka.cluster.ddata.VersionVector} + */ + public static final class VersionVector extends + com.google.protobuf.GeneratedMessage + implements VersionVectorOrBuilder { + // Use VersionVector.newBuilder() to construct. + private VersionVector(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private VersionVector(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final VersionVector defaultInstance; + public static VersionVector getDefaultInstance() { + return defaultInstance; + } + + public VersionVector getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private VersionVector( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + entries_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + entries_.add(input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + entries_ = java.util.Collections.unmodifiableList(entries_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_VersionVector_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_VersionVector_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public VersionVector parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new VersionVector(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface EntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.UniqueAddress node = 1; + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + boolean hasNode(); + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getNode(); + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getNodeOrBuilder(); + + // required int64 version = 2; + /** + * required int64 version = 2; + */ + boolean hasVersion(); + /** + * required int64 version = 2; + */ + long getVersion(); + } + /** + * Protobuf type {@code akka.cluster.ddata.VersionVector.Entry} + */ + public static final class Entry extends + com.google.protobuf.GeneratedMessage + implements EntryOrBuilder { + // Use Entry.newBuilder() to construct. + private Entry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Entry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Entry defaultInstance; + public static Entry getDefaultInstance() { + return defaultInstance; + } + + public Entry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Entry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = node_.toBuilder(); + } + node_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(node_); + node_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 16: { + bitField0_ |= 0x00000002; + version_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_VersionVector_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_VersionVector_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Entry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Entry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .akka.cluster.ddata.UniqueAddress node = 1; + public static final int NODE_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress node_; + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public boolean hasNode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getNode() { + return node_; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getNodeOrBuilder() { + return node_; + } + + // required int64 version = 2; + public static final int VERSION_FIELD_NUMBER = 2; + private long version_; + /** + * required int64 version = 2; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int64 version = 2; + */ + public long getVersion() { + return version_; + } + + private void initFields() { + node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + version_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasNode()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasVersion()) { + memoizedIsInitialized = 0; + return false; + } + if (!getNode().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, node_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(2, version_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, node_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, version_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.VersionVector.Entry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.EntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_VersionVector_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_VersionVector_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getNodeFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (nodeBuilder_ == null) { + node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + } else { + nodeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + version_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_VersionVector_Entry_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (nodeBuilder_ == null) { + result.node_ = node_; + } else { + result.node_ = nodeBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.version_ = version_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.getDefaultInstance()) return this; + if (other.hasNode()) { + mergeNode(other.getNode()); + } + if (other.hasVersion()) { + setVersion(other.getVersion()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasNode()) { + + return false; + } + if (!hasVersion()) { + + return false; + } + if (!getNode().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.UniqueAddress node = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder> nodeBuilder_; + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public boolean hasNode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getNode() { + if (nodeBuilder_ == null) { + return node_; + } else { + return nodeBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public Builder setNode(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress value) { + if (nodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + node_ = value; + onChanged(); + } else { + nodeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public Builder setNode( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder builderForValue) { + if (nodeBuilder_ == null) { + node_ = builderForValue.build(); + onChanged(); + } else { + nodeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public Builder mergeNode(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress value) { + if (nodeBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + node_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance()) { + node_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.newBuilder(node_).mergeFrom(value).buildPartial(); + } else { + node_ = value; + } + onChanged(); + } else { + nodeBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public Builder clearNode() { + if (nodeBuilder_ == null) { + node_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + onChanged(); + } else { + nodeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder getNodeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getNodeFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getNodeOrBuilder() { + if (nodeBuilder_ != null) { + return nodeBuilder_.getMessageOrBuilder(); + } else { + return node_; + } + } + /** + * required .akka.cluster.ddata.UniqueAddress node = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder> + getNodeFieldBuilder() { + if (nodeBuilder_ == null) { + nodeBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder>( + node_, + getParentForChildren(), + isClean()); + node_ = null; + } + return nodeBuilder_; + } + + // required int64 version = 2; + private long version_ ; + /** + * required int64 version = 2; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int64 version = 2; + */ + public long getVersion() { + return version_; + } + /** + * required int64 version = 2; + */ + public Builder setVersion(long value) { + bitField0_ |= 0x00000002; + version_ = value; + onChanged(); + return this; + } + /** + * required int64 version = 2; + */ + public Builder clearVersion() { + bitField0_ = (bitField0_ & ~0x00000002); + version_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.VersionVector.Entry) + } + + static { + defaultInstance = new Entry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.VersionVector.Entry) + } + + // repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + public static final int ENTRIES_FIELD_NUMBER = 1; + private java.util.List entries_; + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public java.util.List getEntriesList() { + return entries_; + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public java.util.List + getEntriesOrBuilderList() { + return entries_; + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public int getEntriesCount() { + return entries_.size(); + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry getEntries(int index) { + return entries_.get(index); + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.EntryOrBuilder getEntriesOrBuilder( + int index) { + return entries_.get(index); + } + + private void initFields() { + entries_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getEntriesCount(); i++) { + if (!getEntries(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < entries_.size(); i++) { + output.writeMessage(1, entries_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < entries_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, entries_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.VersionVector} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVectorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_VersionVector_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_VersionVector_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getEntriesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (entriesBuilder_ == null) { + entries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + entriesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_VersionVector_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector(this); + int from_bitField0_ = bitField0_; + if (entriesBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + entries_ = java.util.Collections.unmodifiableList(entries_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.entries_ = entries_; + } else { + result.entries_ = entriesBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.getDefaultInstance()) return this; + if (entriesBuilder_ == null) { + if (!other.entries_.isEmpty()) { + if (entries_.isEmpty()) { + entries_ = other.entries_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureEntriesIsMutable(); + entries_.addAll(other.entries_); + } + onChanged(); + } + } else { + if (!other.entries_.isEmpty()) { + if (entriesBuilder_.isEmpty()) { + entriesBuilder_.dispose(); + entriesBuilder_ = null; + entries_ = other.entries_; + bitField0_ = (bitField0_ & ~0x00000001); + entriesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getEntriesFieldBuilder() : null; + } else { + entriesBuilder_.addAllMessages(other.entries_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getEntriesCount(); i++) { + if (!getEntries(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + private java.util.List entries_ = + java.util.Collections.emptyList(); + private void ensureEntriesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + entries_ = new java.util.ArrayList(entries_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.EntryOrBuilder> entriesBuilder_; + + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public java.util.List getEntriesList() { + if (entriesBuilder_ == null) { + return java.util.Collections.unmodifiableList(entries_); + } else { + return entriesBuilder_.getMessageList(); + } + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public int getEntriesCount() { + if (entriesBuilder_ == null) { + return entries_.size(); + } else { + return entriesBuilder_.getCount(); + } + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry getEntries(int index) { + if (entriesBuilder_ == null) { + return entries_.get(index); + } else { + return entriesBuilder_.getMessage(index); + } + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public Builder setEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.set(index, value); + onChanged(); + } else { + entriesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public Builder setEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.set(index, builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public Builder addEntries(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.add(value); + onChanged(); + } else { + entriesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public Builder addEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.add(index, value); + onChanged(); + } else { + entriesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public Builder addEntries( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.add(builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public Builder addEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.add(index, builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public Builder addAllEntries( + java.lang.Iterable values) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + super.addAll(values, entries_); + onChanged(); + } else { + entriesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public Builder clearEntries() { + if (entriesBuilder_ == null) { + entries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + entriesBuilder_.clear(); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public Builder removeEntries(int index) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.remove(index); + onChanged(); + } else { + entriesBuilder_.remove(index); + } + return this; + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.Builder getEntriesBuilder( + int index) { + return getEntriesFieldBuilder().getBuilder(index); + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.EntryOrBuilder getEntriesOrBuilder( + int index) { + if (entriesBuilder_ == null) { + return entries_.get(index); } else { + return entriesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public java.util.List + getEntriesOrBuilderList() { + if (entriesBuilder_ != null) { + return entriesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(entries_); + } + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.Builder addEntriesBuilder() { + return getEntriesFieldBuilder().addBuilder( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.Builder addEntriesBuilder( + int index) { + return getEntriesFieldBuilder().addBuilder( + index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.VersionVector.Entry entries = 1; + */ + public java.util.List + getEntriesBuilderList() { + return getEntriesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.EntryOrBuilder> + getEntriesFieldBuilder() { + if (entriesBuilder_ == null) { + entriesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.VersionVector.EntryOrBuilder>( + entries_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + entries_ = null; + } + return entriesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.VersionVector) + } + + static { + defaultInstance = new VersionVector(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.VersionVector) + } + + public interface ORMapOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.ORSet keys = 1; + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + boolean hasKeys(); + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet getKeys(); + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder getKeysOrBuilder(); + + // repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + java.util.List + getEntriesList(); + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry getEntries(int index); + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + int getEntriesCount(); + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + java.util.List + getEntriesOrBuilderList(); + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.EntryOrBuilder getEntriesOrBuilder( + int index); + } + /** + * Protobuf type {@code akka.cluster.ddata.ORMap} + */ + public static final class ORMap extends + com.google.protobuf.GeneratedMessage + implements ORMapOrBuilder { + // Use ORMap.newBuilder() to construct. + private ORMap(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ORMap(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ORMap defaultInstance; + public static ORMap getDefaultInstance() { + return defaultInstance; + } + + public ORMap getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ORMap( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = keys_.toBuilder(); + } + keys_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(keys_); + keys_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + entries_.add(input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = java.util.Collections.unmodifiableList(entries_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORMap_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORMap_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ORMap parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ORMap(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface EntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string key = 1; + /** + * required string key = 1; + */ + boolean hasKey(); + /** + * required string key = 1; + */ + java.lang.String getKey(); + /** + * required string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(); + + // required .akka.cluster.ddata.OtherMessage value = 2; + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + boolean hasValue(); + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getValue(); + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getValueOrBuilder(); + } + /** + * Protobuf type {@code akka.cluster.ddata.ORMap.Entry} + */ + public static final class Entry extends + com.google.protobuf.GeneratedMessage + implements EntryOrBuilder { + // Use Entry.newBuilder() to construct. + private Entry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Entry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Entry defaultInstance; + public static Entry getDefaultInstance() { + return defaultInstance; + } + + public Entry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Entry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + key_ = input.readBytes(); + break; + } + case 18: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = value_.toBuilder(); + } + value_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(value_); + value_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORMap_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORMap_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Entry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Entry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private java.lang.Object key_; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .akka.cluster.ddata.OtherMessage value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value_; + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getValue() { + return value_; + } + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getValueOrBuilder() { + return value_; + } + + private void initFields() { + key_ = ""; + value_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; + } + if (!getValue().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, value_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, value_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.ORMap.Entry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.EntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORMap_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORMap_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getValueFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (valueBuilder_ == null) { + value_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + valueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORMap_Entry_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.key_ = key_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (valueBuilder_ == null) { + result.value_ = value_; + } else { + result.value_ = valueBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.getDefaultInstance()) return this; + if (other.hasKey()) { + bitField0_ |= 0x00000001; + key_ = other.key_; + onChanged(); + } + if (other.hasValue()) { + mergeValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!hasValue()) { + + return false; + } + if (!getValue().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string key = 1; + private java.lang.Object key_ = ""; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string key = 1; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000001); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + + // required .akka.cluster.ddata.OtherMessage value = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> valueBuilder_; + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getValue() { + if (valueBuilder_ == null) { + return value_; + } else { + return valueBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + public Builder setValue(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (valueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + value_ = value; + onChanged(); + } else { + valueBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + public Builder setValue( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (valueBuilder_ == null) { + value_ = builderForValue.build(); + onChanged(); + } else { + valueBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + public Builder mergeValue(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (valueBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + value_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + value_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(value_).mergeFrom(value).buildPartial(); + } else { + value_ = value; + } + onChanged(); + } else { + valueBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + public Builder clearValue() { + if (valueBuilder_ == null) { + value_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + valueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getValueBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getValueFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getValueOrBuilder() { + if (valueBuilder_ != null) { + return valueBuilder_.getMessageOrBuilder(); + } else { + return value_; + } + } + /** + * required .akka.cluster.ddata.OtherMessage value = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getValueFieldBuilder() { + if (valueBuilder_ == null) { + valueBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + value_, + getParentForChildren(), + isClean()); + value_ = null; + } + return valueBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.ORMap.Entry) + } + + static { + defaultInstance = new Entry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.ORMap.Entry) + } + + private int bitField0_; + // required .akka.cluster.ddata.ORSet keys = 1; + public static final int KEYS_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet keys_; + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public boolean hasKeys() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet getKeys() { + return keys_; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder getKeysOrBuilder() { + return keys_; + } + + // repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + public static final int ENTRIES_FIELD_NUMBER = 2; + private java.util.List entries_; + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public java.util.List getEntriesList() { + return entries_; + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public java.util.List + getEntriesOrBuilderList() { + return entries_; + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public int getEntriesCount() { + return entries_.size(); + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry getEntries(int index) { + return entries_.get(index); + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.EntryOrBuilder getEntriesOrBuilder( + int index) { + return entries_.get(index); + } + + private void initFields() { + keys_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance(); + entries_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKeys()) { + memoizedIsInitialized = 0; + return false; + } + if (!getKeys().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getEntriesCount(); i++) { + if (!getEntries(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, keys_); + } + for (int i = 0; i < entries_.size(); i++) { + output.writeMessage(2, entries_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, keys_); + } + for (int i = 0; i < entries_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, entries_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.ORMap} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMapOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORMap_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORMap_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getKeysFieldBuilder(); + getEntriesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (keysBuilder_ == null) { + keys_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance(); + } else { + keysBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (entriesBuilder_ == null) { + entries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + entriesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_ORMap_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (keysBuilder_ == null) { + result.keys_ = keys_; + } else { + result.keys_ = keysBuilder_.build(); + } + if (entriesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = java.util.Collections.unmodifiableList(entries_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.entries_ = entries_; + } else { + result.entries_ = entriesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.getDefaultInstance()) return this; + if (other.hasKeys()) { + mergeKeys(other.getKeys()); + } + if (entriesBuilder_ == null) { + if (!other.entries_.isEmpty()) { + if (entries_.isEmpty()) { + entries_ = other.entries_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureEntriesIsMutable(); + entries_.addAll(other.entries_); + } + onChanged(); + } + } else { + if (!other.entries_.isEmpty()) { + if (entriesBuilder_.isEmpty()) { + entriesBuilder_.dispose(); + entriesBuilder_ = null; + entries_ = other.entries_; + bitField0_ = (bitField0_ & ~0x00000002); + entriesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getEntriesFieldBuilder() : null; + } else { + entriesBuilder_.addAllMessages(other.entries_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKeys()) { + + return false; + } + if (!getKeys().isInitialized()) { + + return false; + } + for (int i = 0; i < getEntriesCount(); i++) { + if (!getEntries(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.ORSet keys = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet keys_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder> keysBuilder_; + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public boolean hasKeys() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet getKeys() { + if (keysBuilder_ == null) { + return keys_; + } else { + return keysBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public Builder setKeys(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet value) { + if (keysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + keys_ = value; + onChanged(); + } else { + keysBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public Builder setKeys( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder builderForValue) { + if (keysBuilder_ == null) { + keys_ = builderForValue.build(); + onChanged(); + } else { + keysBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public Builder mergeKeys(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet value) { + if (keysBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + keys_ != akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance()) { + keys_ = + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.newBuilder(keys_).mergeFrom(value).buildPartial(); + } else { + keys_ = value; + } + onChanged(); + } else { + keysBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public Builder clearKeys() { + if (keysBuilder_ == null) { + keys_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance(); + onChanged(); + } else { + keysBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder getKeysBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getKeysFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder getKeysOrBuilder() { + if (keysBuilder_ != null) { + return keysBuilder_.getMessageOrBuilder(); + } else { + return keys_; + } + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder> + getKeysFieldBuilder() { + if (keysBuilder_ == null) { + keysBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder>( + keys_, + getParentForChildren(), + isClean()); + keys_ = null; + } + return keysBuilder_; + } + + // repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + private java.util.List entries_ = + java.util.Collections.emptyList(); + private void ensureEntriesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = new java.util.ArrayList(entries_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.EntryOrBuilder> entriesBuilder_; + + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public java.util.List getEntriesList() { + if (entriesBuilder_ == null) { + return java.util.Collections.unmodifiableList(entries_); + } else { + return entriesBuilder_.getMessageList(); + } + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public int getEntriesCount() { + if (entriesBuilder_ == null) { + return entries_.size(); + } else { + return entriesBuilder_.getCount(); + } + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry getEntries(int index) { + if (entriesBuilder_ == null) { + return entries_.get(index); + } else { + return entriesBuilder_.getMessage(index); + } + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public Builder setEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.set(index, value); + onChanged(); + } else { + entriesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public Builder setEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.set(index, builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public Builder addEntries(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.add(value); + onChanged(); + } else { + entriesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public Builder addEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.add(index, value); + onChanged(); + } else { + entriesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public Builder addEntries( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.add(builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public Builder addEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.add(index, builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public Builder addAllEntries( + java.lang.Iterable values) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + super.addAll(values, entries_); + onChanged(); + } else { + entriesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public Builder clearEntries() { + if (entriesBuilder_ == null) { + entries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + entriesBuilder_.clear(); + } + return this; + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public Builder removeEntries(int index) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.remove(index); + onChanged(); + } else { + entriesBuilder_.remove(index); + } + return this; + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.Builder getEntriesBuilder( + int index) { + return getEntriesFieldBuilder().getBuilder(index); + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.EntryOrBuilder getEntriesOrBuilder( + int index) { + if (entriesBuilder_ == null) { + return entries_.get(index); } else { + return entriesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public java.util.List + getEntriesOrBuilderList() { + if (entriesBuilder_ != null) { + return entriesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(entries_); + } + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.Builder addEntriesBuilder() { + return getEntriesFieldBuilder().addBuilder( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.Builder addEntriesBuilder( + int index) { + return getEntriesFieldBuilder().addBuilder( + index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.ORMap.Entry entries = 2; + */ + public java.util.List + getEntriesBuilderList() { + return getEntriesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.EntryOrBuilder> + getEntriesFieldBuilder() { + if (entriesBuilder_ == null) { + entriesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORMap.EntryOrBuilder>( + entries_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + entries_ = null; + } + return entriesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.ORMap) + } + + static { + defaultInstance = new ORMap(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.ORMap) + } + + public interface LWWMapOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.ORSet keys = 1; + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + boolean hasKeys(); + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet getKeys(); + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder getKeysOrBuilder(); + + // repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + java.util.List + getEntriesList(); + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry getEntries(int index); + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + int getEntriesCount(); + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + java.util.List + getEntriesOrBuilderList(); + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.EntryOrBuilder getEntriesOrBuilder( + int index); + } + /** + * Protobuf type {@code akka.cluster.ddata.LWWMap} + */ + public static final class LWWMap extends + com.google.protobuf.GeneratedMessage + implements LWWMapOrBuilder { + // Use LWWMap.newBuilder() to construct. + private LWWMap(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private LWWMap(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final LWWMap defaultInstance; + public static LWWMap getDefaultInstance() { + return defaultInstance; + } + + public LWWMap getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private LWWMap( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = keys_.toBuilder(); + } + keys_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(keys_); + keys_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + entries_.add(input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = java.util.Collections.unmodifiableList(entries_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWMap_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWMap_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public LWWMap parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new LWWMap(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface EntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string key = 1; + /** + * required string key = 1; + */ + boolean hasKey(); + /** + * required string key = 1; + */ + java.lang.String getKey(); + /** + * required string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(); + + // required .akka.cluster.ddata.LWWRegister value = 2; + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + boolean hasValue(); + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister getValue(); + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegisterOrBuilder getValueOrBuilder(); + } + /** + * Protobuf type {@code akka.cluster.ddata.LWWMap.Entry} + */ + public static final class Entry extends + com.google.protobuf.GeneratedMessage + implements EntryOrBuilder { + // Use Entry.newBuilder() to construct. + private Entry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Entry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Entry defaultInstance; + public static Entry getDefaultInstance() { + return defaultInstance; + } + + public Entry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Entry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + key_ = input.readBytes(); + break; + } + case 18: { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = value_.toBuilder(); + } + value_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(value_); + value_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWMap_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWMap_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Entry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Entry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private java.lang.Object key_; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .akka.cluster.ddata.LWWRegister value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister value_; + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister getValue() { + return value_; + } + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegisterOrBuilder getValueOrBuilder() { + return value_; + } + + private void initFields() { + key_ = ""; + value_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; + } + if (!getValue().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, value_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, value_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.LWWMap.Entry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.EntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWMap_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWMap_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getValueFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (valueBuilder_ == null) { + value_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.getDefaultInstance(); + } else { + valueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWMap_Entry_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.key_ = key_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (valueBuilder_ == null) { + result.value_ = value_; + } else { + result.value_ = valueBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.getDefaultInstance()) return this; + if (other.hasKey()) { + bitField0_ |= 0x00000001; + key_ = other.key_; + onChanged(); + } + if (other.hasValue()) { + mergeValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!hasValue()) { + + return false; + } + if (!getValue().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string key = 1; + private java.lang.Object key_ = ""; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string key = 1; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000001); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + + // required .akka.cluster.ddata.LWWRegister value = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister value_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegisterOrBuilder> valueBuilder_; + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister getValue() { + if (valueBuilder_ == null) { + return value_; + } else { + return valueBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + public Builder setValue(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister value) { + if (valueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + value_ = value; + onChanged(); + } else { + valueBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + public Builder setValue( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.Builder builderForValue) { + if (valueBuilder_ == null) { + value_ = builderForValue.build(); + onChanged(); + } else { + valueBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + public Builder mergeValue(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister value) { + if (valueBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + value_ != akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.getDefaultInstance()) { + value_ = + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.newBuilder(value_).mergeFrom(value).buildPartial(); + } else { + value_ = value; + } + onChanged(); + } else { + valueBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + public Builder clearValue() { + if (valueBuilder_ == null) { + value_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.getDefaultInstance(); + onChanged(); + } else { + valueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.Builder getValueBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getValueFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegisterOrBuilder getValueOrBuilder() { + if (valueBuilder_ != null) { + return valueBuilder_.getMessageOrBuilder(); + } else { + return value_; + } + } + /** + * required .akka.cluster.ddata.LWWRegister value = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegisterOrBuilder> + getValueFieldBuilder() { + if (valueBuilder_ == null) { + valueBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegister.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWRegisterOrBuilder>( + value_, + getParentForChildren(), + isClean()); + value_ = null; + } + return valueBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.LWWMap.Entry) + } + + static { + defaultInstance = new Entry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.LWWMap.Entry) + } + + private int bitField0_; + // required .akka.cluster.ddata.ORSet keys = 1; + public static final int KEYS_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet keys_; + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public boolean hasKeys() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet getKeys() { + return keys_; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder getKeysOrBuilder() { + return keys_; + } + + // repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + public static final int ENTRIES_FIELD_NUMBER = 2; + private java.util.List entries_; + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public java.util.List getEntriesList() { + return entries_; + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public java.util.List + getEntriesOrBuilderList() { + return entries_; + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public int getEntriesCount() { + return entries_.size(); + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry getEntries(int index) { + return entries_.get(index); + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.EntryOrBuilder getEntriesOrBuilder( + int index) { + return entries_.get(index); + } + + private void initFields() { + keys_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance(); + entries_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKeys()) { + memoizedIsInitialized = 0; + return false; + } + if (!getKeys().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getEntriesCount(); i++) { + if (!getEntries(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, keys_); + } + for (int i = 0; i < entries_.size(); i++) { + output.writeMessage(2, entries_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, keys_); + } + for (int i = 0; i < entries_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, entries_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.LWWMap} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMapOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWMap_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWMap_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getKeysFieldBuilder(); + getEntriesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (keysBuilder_ == null) { + keys_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance(); + } else { + keysBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (entriesBuilder_ == null) { + entries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + entriesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_LWWMap_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (keysBuilder_ == null) { + result.keys_ = keys_; + } else { + result.keys_ = keysBuilder_.build(); + } + if (entriesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = java.util.Collections.unmodifiableList(entries_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.entries_ = entries_; + } else { + result.entries_ = entriesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.getDefaultInstance()) return this; + if (other.hasKeys()) { + mergeKeys(other.getKeys()); + } + if (entriesBuilder_ == null) { + if (!other.entries_.isEmpty()) { + if (entries_.isEmpty()) { + entries_ = other.entries_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureEntriesIsMutable(); + entries_.addAll(other.entries_); + } + onChanged(); + } + } else { + if (!other.entries_.isEmpty()) { + if (entriesBuilder_.isEmpty()) { + entriesBuilder_.dispose(); + entriesBuilder_ = null; + entries_ = other.entries_; + bitField0_ = (bitField0_ & ~0x00000002); + entriesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getEntriesFieldBuilder() : null; + } else { + entriesBuilder_.addAllMessages(other.entries_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKeys()) { + + return false; + } + if (!getKeys().isInitialized()) { + + return false; + } + for (int i = 0; i < getEntriesCount(); i++) { + if (!getEntries(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.ORSet keys = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet keys_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder> keysBuilder_; + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public boolean hasKeys() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet getKeys() { + if (keysBuilder_ == null) { + return keys_; + } else { + return keysBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public Builder setKeys(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet value) { + if (keysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + keys_ = value; + onChanged(); + } else { + keysBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public Builder setKeys( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder builderForValue) { + if (keysBuilder_ == null) { + keys_ = builderForValue.build(); + onChanged(); + } else { + keysBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public Builder mergeKeys(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet value) { + if (keysBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + keys_ != akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance()) { + keys_ = + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.newBuilder(keys_).mergeFrom(value).buildPartial(); + } else { + keys_ = value; + } + onChanged(); + } else { + keysBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public Builder clearKeys() { + if (keysBuilder_ == null) { + keys_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance(); + onChanged(); + } else { + keysBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder getKeysBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getKeysFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder getKeysOrBuilder() { + if (keysBuilder_ != null) { + return keysBuilder_.getMessageOrBuilder(); + } else { + return keys_; + } + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder> + getKeysFieldBuilder() { + if (keysBuilder_ == null) { + keysBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder>( + keys_, + getParentForChildren(), + isClean()); + keys_ = null; + } + return keysBuilder_; + } + + // repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + private java.util.List entries_ = + java.util.Collections.emptyList(); + private void ensureEntriesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = new java.util.ArrayList(entries_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.EntryOrBuilder> entriesBuilder_; + + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public java.util.List getEntriesList() { + if (entriesBuilder_ == null) { + return java.util.Collections.unmodifiableList(entries_); + } else { + return entriesBuilder_.getMessageList(); + } + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public int getEntriesCount() { + if (entriesBuilder_ == null) { + return entries_.size(); + } else { + return entriesBuilder_.getCount(); + } + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry getEntries(int index) { + if (entriesBuilder_ == null) { + return entries_.get(index); + } else { + return entriesBuilder_.getMessage(index); + } + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public Builder setEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.set(index, value); + onChanged(); + } else { + entriesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public Builder setEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.set(index, builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public Builder addEntries(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.add(value); + onChanged(); + } else { + entriesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public Builder addEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.add(index, value); + onChanged(); + } else { + entriesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public Builder addEntries( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.add(builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public Builder addEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.add(index, builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public Builder addAllEntries( + java.lang.Iterable values) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + super.addAll(values, entries_); + onChanged(); + } else { + entriesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public Builder clearEntries() { + if (entriesBuilder_ == null) { + entries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + entriesBuilder_.clear(); + } + return this; + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public Builder removeEntries(int index) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.remove(index); + onChanged(); + } else { + entriesBuilder_.remove(index); + } + return this; + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.Builder getEntriesBuilder( + int index) { + return getEntriesFieldBuilder().getBuilder(index); + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.EntryOrBuilder getEntriesOrBuilder( + int index) { + if (entriesBuilder_ == null) { + return entries_.get(index); } else { + return entriesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public java.util.List + getEntriesOrBuilderList() { + if (entriesBuilder_ != null) { + return entriesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(entries_); + } + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.Builder addEntriesBuilder() { + return getEntriesFieldBuilder().addBuilder( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.Builder addEntriesBuilder( + int index) { + return getEntriesFieldBuilder().addBuilder( + index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.LWWMap.Entry entries = 2; + */ + public java.util.List + getEntriesBuilderList() { + return getEntriesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.EntryOrBuilder> + getEntriesFieldBuilder() { + if (entriesBuilder_ == null) { + entriesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.LWWMap.EntryOrBuilder>( + entries_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + entries_ = null; + } + return entriesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.LWWMap) + } + + static { + defaultInstance = new LWWMap(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.LWWMap) + } + + public interface PNCounterMapOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.ORSet keys = 1; + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + boolean hasKeys(); + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet getKeys(); + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder getKeysOrBuilder(); + + // repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + java.util.List + getEntriesList(); + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry getEntries(int index); + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + int getEntriesCount(); + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + java.util.List + getEntriesOrBuilderList(); + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.EntryOrBuilder getEntriesOrBuilder( + int index); + } + /** + * Protobuf type {@code akka.cluster.ddata.PNCounterMap} + */ + public static final class PNCounterMap extends + com.google.protobuf.GeneratedMessage + implements PNCounterMapOrBuilder { + // Use PNCounterMap.newBuilder() to construct. + private PNCounterMap(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private PNCounterMap(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final PNCounterMap defaultInstance; + public static PNCounterMap getDefaultInstance() { + return defaultInstance; + } + + public PNCounterMap getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PNCounterMap( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = keys_.toBuilder(); + } + keys_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(keys_); + keys_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + entries_.add(input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = java.util.Collections.unmodifiableList(entries_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounterMap_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounterMap_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PNCounterMap parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PNCounterMap(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface EntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string key = 1; + /** + * required string key = 1; + */ + boolean hasKey(); + /** + * required string key = 1; + */ + java.lang.String getKey(); + /** + * required string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(); + + // required .akka.cluster.ddata.PNCounter value = 2; + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + boolean hasValue(); + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter getValue(); + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterOrBuilder getValueOrBuilder(); + } + /** + * Protobuf type {@code akka.cluster.ddata.PNCounterMap.Entry} + */ + public static final class Entry extends + com.google.protobuf.GeneratedMessage + implements EntryOrBuilder { + // Use Entry.newBuilder() to construct. + private Entry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Entry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Entry defaultInstance; + public static Entry getDefaultInstance() { + return defaultInstance; + } + + public Entry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Entry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + key_ = input.readBytes(); + break; + } + case 18: { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = value_.toBuilder(); + } + value_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(value_); + value_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounterMap_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounterMap_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Entry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Entry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private java.lang.Object key_; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .akka.cluster.ddata.PNCounter value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter value_; + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter getValue() { + return value_; + } + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterOrBuilder getValueOrBuilder() { + return value_; + } + + private void initFields() { + key_ = ""; + value_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; + } + if (!getValue().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, value_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, value_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.PNCounterMap.Entry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.EntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounterMap_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounterMap_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getValueFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (valueBuilder_ == null) { + value_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.getDefaultInstance(); + } else { + valueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounterMap_Entry_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.key_ = key_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (valueBuilder_ == null) { + result.value_ = value_; + } else { + result.value_ = valueBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.getDefaultInstance()) return this; + if (other.hasKey()) { + bitField0_ |= 0x00000001; + key_ = other.key_; + onChanged(); + } + if (other.hasValue()) { + mergeValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!hasValue()) { + + return false; + } + if (!getValue().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string key = 1; + private java.lang.Object key_ = ""; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string key = 1; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000001); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + + // required .akka.cluster.ddata.PNCounter value = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter value_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterOrBuilder> valueBuilder_; + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter getValue() { + if (valueBuilder_ == null) { + return value_; + } else { + return valueBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + public Builder setValue(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter value) { + if (valueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + value_ = value; + onChanged(); + } else { + valueBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + public Builder setValue( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.Builder builderForValue) { + if (valueBuilder_ == null) { + value_ = builderForValue.build(); + onChanged(); + } else { + valueBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + public Builder mergeValue(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter value) { + if (valueBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + value_ != akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.getDefaultInstance()) { + value_ = + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.newBuilder(value_).mergeFrom(value).buildPartial(); + } else { + value_ = value; + } + onChanged(); + } else { + valueBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + public Builder clearValue() { + if (valueBuilder_ == null) { + value_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.getDefaultInstance(); + onChanged(); + } else { + valueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.Builder getValueBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getValueFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterOrBuilder getValueOrBuilder() { + if (valueBuilder_ != null) { + return valueBuilder_.getMessageOrBuilder(); + } else { + return value_; + } + } + /** + * required .akka.cluster.ddata.PNCounter value = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterOrBuilder> + getValueFieldBuilder() { + if (valueBuilder_ == null) { + valueBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounter.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterOrBuilder>( + value_, + getParentForChildren(), + isClean()); + value_ = null; + } + return valueBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.PNCounterMap.Entry) + } + + static { + defaultInstance = new Entry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.PNCounterMap.Entry) + } + + private int bitField0_; + // required .akka.cluster.ddata.ORSet keys = 1; + public static final int KEYS_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet keys_; + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public boolean hasKeys() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet getKeys() { + return keys_; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder getKeysOrBuilder() { + return keys_; + } + + // repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + public static final int ENTRIES_FIELD_NUMBER = 2; + private java.util.List entries_; + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public java.util.List getEntriesList() { + return entries_; + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public java.util.List + getEntriesOrBuilderList() { + return entries_; + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public int getEntriesCount() { + return entries_.size(); + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry getEntries(int index) { + return entries_.get(index); + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.EntryOrBuilder getEntriesOrBuilder( + int index) { + return entries_.get(index); + } + + private void initFields() { + keys_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance(); + entries_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKeys()) { + memoizedIsInitialized = 0; + return false; + } + if (!getKeys().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getEntriesCount(); i++) { + if (!getEntries(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, keys_); + } + for (int i = 0; i < entries_.size(); i++) { + output.writeMessage(2, entries_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, keys_); + } + for (int i = 0; i < entries_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, entries_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.PNCounterMap} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMapOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounterMap_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounterMap_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.class, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getKeysFieldBuilder(); + getEntriesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (keysBuilder_ == null) { + keys_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance(); + } else { + keysBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (entriesBuilder_ == null) { + entries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + entriesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.internal_static_akka_cluster_ddata_PNCounterMap_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap build() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap result = new akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (keysBuilder_ == null) { + result.keys_ = keys_; + } else { + result.keys_ = keysBuilder_.build(); + } + if (entriesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = java.util.Collections.unmodifiableList(entries_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.entries_ = entries_; + } else { + result.entries_ = entriesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.getDefaultInstance()) return this; + if (other.hasKeys()) { + mergeKeys(other.getKeys()); + } + if (entriesBuilder_ == null) { + if (!other.entries_.isEmpty()) { + if (entries_.isEmpty()) { + entries_ = other.entries_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureEntriesIsMutable(); + entries_.addAll(other.entries_); + } + onChanged(); + } + } else { + if (!other.entries_.isEmpty()) { + if (entriesBuilder_.isEmpty()) { + entriesBuilder_.dispose(); + entriesBuilder_ = null; + entries_ = other.entries_; + bitField0_ = (bitField0_ & ~0x00000002); + entriesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getEntriesFieldBuilder() : null; + } else { + entriesBuilder_.addAllMessages(other.entries_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKeys()) { + + return false; + } + if (!getKeys().isInitialized()) { + + return false; + } + for (int i = 0; i < getEntriesCount(); i++) { + if (!getEntries(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.ORSet keys = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet keys_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder> keysBuilder_; + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public boolean hasKeys() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet getKeys() { + if (keysBuilder_ == null) { + return keys_; + } else { + return keysBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public Builder setKeys(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet value) { + if (keysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + keys_ = value; + onChanged(); + } else { + keysBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public Builder setKeys( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder builderForValue) { + if (keysBuilder_ == null) { + keys_ = builderForValue.build(); + onChanged(); + } else { + keysBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public Builder mergeKeys(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet value) { + if (keysBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + keys_ != akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance()) { + keys_ = + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.newBuilder(keys_).mergeFrom(value).buildPartial(); + } else { + keys_ = value; + } + onChanged(); + } else { + keysBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public Builder clearKeys() { + if (keysBuilder_ == null) { + keys_ = akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.getDefaultInstance(); + onChanged(); + } else { + keysBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder getKeysBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getKeysFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder getKeysOrBuilder() { + if (keysBuilder_ != null) { + return keysBuilder_.getMessageOrBuilder(); + } else { + return keys_; + } + } + /** + * required .akka.cluster.ddata.ORSet keys = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder> + getKeysFieldBuilder() { + if (keysBuilder_ == null) { + keysBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSet.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.ORSetOrBuilder>( + keys_, + getParentForChildren(), + isClean()); + keys_ = null; + } + return keysBuilder_; + } + + // repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + private java.util.List entries_ = + java.util.Collections.emptyList(); + private void ensureEntriesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = new java.util.ArrayList(entries_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.EntryOrBuilder> entriesBuilder_; + + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public java.util.List getEntriesList() { + if (entriesBuilder_ == null) { + return java.util.Collections.unmodifiableList(entries_); + } else { + return entriesBuilder_.getMessageList(); + } + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public int getEntriesCount() { + if (entriesBuilder_ == null) { + return entries_.size(); + } else { + return entriesBuilder_.getCount(); + } + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry getEntries(int index) { + if (entriesBuilder_ == null) { + return entries_.get(index); + } else { + return entriesBuilder_.getMessage(index); + } + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public Builder setEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.set(index, value); + onChanged(); + } else { + entriesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public Builder setEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.set(index, builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public Builder addEntries(akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.add(value); + onChanged(); + } else { + entriesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public Builder addEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.add(index, value); + onChanged(); + } else { + entriesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public Builder addEntries( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.add(builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public Builder addEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.add(index, builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public Builder addAllEntries( + java.lang.Iterable values) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + super.addAll(values, entries_); + onChanged(); + } else { + entriesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public Builder clearEntries() { + if (entriesBuilder_ == null) { + entries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + entriesBuilder_.clear(); + } + return this; + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public Builder removeEntries(int index) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.remove(index); + onChanged(); + } else { + entriesBuilder_.remove(index); + } + return this; + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.Builder getEntriesBuilder( + int index) { + return getEntriesFieldBuilder().getBuilder(index); + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.EntryOrBuilder getEntriesOrBuilder( + int index) { + if (entriesBuilder_ == null) { + return entries_.get(index); } else { + return entriesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public java.util.List + getEntriesOrBuilderList() { + if (entriesBuilder_ != null) { + return entriesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(entries_); + } + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.Builder addEntriesBuilder() { + return getEntriesFieldBuilder().addBuilder( + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.Builder addEntriesBuilder( + int index) { + return getEntriesFieldBuilder().addBuilder( + index, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.PNCounterMap.Entry entries = 2; + */ + public java.util.List + getEntriesBuilderList() { + return getEntriesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.EntryOrBuilder> + getEntriesFieldBuilder() { + if (entriesBuilder_ == null) { + entriesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatedDataMessages.PNCounterMap.EntryOrBuilder>( + entries_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + entries_ = null; + } + return entriesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.PNCounterMap) + } + + static { + defaultInstance = new PNCounterMap(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.PNCounterMap) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_GSet_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_GSet_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_ORSet_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_ORSet_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_Flag_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_Flag_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_LWWRegister_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_LWWRegister_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_GCounter_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_GCounter_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_GCounter_Entry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_GCounter_Entry_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_PNCounter_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_PNCounter_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_VersionVector_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_VersionVector_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_VersionVector_Entry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_VersionVector_Entry_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_ORMap_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_ORMap_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_ORMap_Entry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_ORMap_Entry_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_LWWMap_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_LWWMap_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_LWWMap_Entry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_LWWMap_Entry_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_PNCounterMap_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_PNCounterMap_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_PNCounterMap_Entry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_PNCounterMap_Entry_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\034ReplicatedDataMessages.proto\022\022akka.clu" + + "ster.ddata\032\030ReplicatorMessages.proto\"\212\001\n" + + "\004GSet\022\026\n\016stringElements\030\001 \003(\t\022\027\n\013intElem" + + "ents\030\002 \003(\021B\002\020\001\022\030\n\014longElements\030\003 \003(\022B\002\020\001" + + "\0227\n\rotherElements\030\004 \003(\0132 .akka.cluster.d" + + "data.OtherMessage\"\360\001\n\005ORSet\0222\n\007vvector\030\001" + + " \002(\0132!.akka.cluster.ddata.VersionVector\022" + + "/\n\004dots\030\002 \003(\0132!.akka.cluster.ddata.Versi" + + "onVector\022\026\n\016stringElements\030\003 \003(\t\022\027\n\013intE" + + "lements\030\004 \003(\021B\002\020\001\022\030\n\014longElements\030\005 \003(\022B", + "\002\020\001\0227\n\rotherElements\030\006 \003(\0132 .akka.cluste" + + "r.ddata.OtherMessage\"\027\n\004Flag\022\017\n\007enabled\030" + + "\001 \002(\010\"\202\001\n\013LWWRegister\022\021\n\ttimestamp\030\001 \002(\022" + + "\022/\n\004node\030\002 \002(\0132!.akka.cluster.ddata.Uniq" + + "ueAddress\022/\n\005state\030\003 \002(\0132 .akka.cluster." + + "ddata.OtherMessage\"\210\001\n\010GCounter\0223\n\007entri" + + "es\030\001 \003(\0132\".akka.cluster.ddata.GCounter.E" + + "ntry\032G\n\005Entry\022/\n\004node\030\001 \002(\0132!.akka.clust" + + "er.ddata.UniqueAddress\022\r\n\005value\030\002 \002(\014\"o\n" + + "\tPNCounter\0220\n\nincrements\030\001 \002(\0132\034.akka.cl", + "uster.ddata.GCounter\0220\n\ndecrements\030\002 \002(\013" + + "2\034.akka.cluster.ddata.GCounter\"\224\001\n\rVersi" + + "onVector\0228\n\007entries\030\001 \003(\0132\'.akka.cluster" + + ".ddata.VersionVector.Entry\032I\n\005Entry\022/\n\004n" + + "ode\030\001 \002(\0132!.akka.cluster.ddata.UniqueAdd" + + "ress\022\017\n\007version\030\002 \002(\003\"\251\001\n\005ORMap\022\'\n\004keys\030" + + "\001 \002(\0132\031.akka.cluster.ddata.ORSet\0220\n\007entr" + + "ies\030\002 \003(\0132\037.akka.cluster.ddata.ORMap.Ent" + + "ry\032E\n\005Entry\022\013\n\003key\030\001 \002(\t\022/\n\005value\030\002 \002(\0132" + + " .akka.cluster.ddata.OtherMessage\"\252\001\n\006LW", + "WMap\022\'\n\004keys\030\001 \002(\0132\031.akka.cluster.ddata." + + "ORSet\0221\n\007entries\030\002 \003(\0132 .akka.cluster.dd" + + "ata.LWWMap.Entry\032D\n\005Entry\022\013\n\003key\030\001 \002(\t\022." + + "\n\005value\030\002 \002(\0132\037.akka.cluster.ddata.LWWRe" + + "gister\"\264\001\n\014PNCounterMap\022\'\n\004keys\030\001 \002(\0132\031." + + "akka.cluster.ddata.ORSet\0227\n\007entries\030\002 \003(" + + "\0132&.akka.cluster.ddata.PNCounterMap.Entr" + + "y\032B\n\005Entry\022\013\n\003key\030\001 \002(\t\022,\n\005value\030\002 \002(\0132\035" + + ".akka.cluster.ddata.PNCounterB#\n\037akka.cl" + + "uster.ddata.protobuf.msgH\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_akka_cluster_ddata_GSet_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_akka_cluster_ddata_GSet_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_GSet_descriptor, + new java.lang.String[] { "StringElements", "IntElements", "LongElements", "OtherElements", }); + internal_static_akka_cluster_ddata_ORSet_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_akka_cluster_ddata_ORSet_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_ORSet_descriptor, + new java.lang.String[] { "Vvector", "Dots", "StringElements", "IntElements", "LongElements", "OtherElements", }); + internal_static_akka_cluster_ddata_Flag_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_akka_cluster_ddata_Flag_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_Flag_descriptor, + new java.lang.String[] { "Enabled", }); + internal_static_akka_cluster_ddata_LWWRegister_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_akka_cluster_ddata_LWWRegister_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_LWWRegister_descriptor, + new java.lang.String[] { "Timestamp", "Node", "State", }); + internal_static_akka_cluster_ddata_GCounter_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_akka_cluster_ddata_GCounter_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_GCounter_descriptor, + new java.lang.String[] { "Entries", }); + internal_static_akka_cluster_ddata_GCounter_Entry_descriptor = + internal_static_akka_cluster_ddata_GCounter_descriptor.getNestedTypes().get(0); + internal_static_akka_cluster_ddata_GCounter_Entry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_GCounter_Entry_descriptor, + new java.lang.String[] { "Node", "Value", }); + internal_static_akka_cluster_ddata_PNCounter_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_akka_cluster_ddata_PNCounter_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_PNCounter_descriptor, + new java.lang.String[] { "Increments", "Decrements", }); + internal_static_akka_cluster_ddata_VersionVector_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_akka_cluster_ddata_VersionVector_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_VersionVector_descriptor, + new java.lang.String[] { "Entries", }); + internal_static_akka_cluster_ddata_VersionVector_Entry_descriptor = + internal_static_akka_cluster_ddata_VersionVector_descriptor.getNestedTypes().get(0); + internal_static_akka_cluster_ddata_VersionVector_Entry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_VersionVector_Entry_descriptor, + new java.lang.String[] { "Node", "Version", }); + internal_static_akka_cluster_ddata_ORMap_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_akka_cluster_ddata_ORMap_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_ORMap_descriptor, + new java.lang.String[] { "Keys", "Entries", }); + internal_static_akka_cluster_ddata_ORMap_Entry_descriptor = + internal_static_akka_cluster_ddata_ORMap_descriptor.getNestedTypes().get(0); + internal_static_akka_cluster_ddata_ORMap_Entry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_ORMap_Entry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_akka_cluster_ddata_LWWMap_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_akka_cluster_ddata_LWWMap_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_LWWMap_descriptor, + new java.lang.String[] { "Keys", "Entries", }); + internal_static_akka_cluster_ddata_LWWMap_Entry_descriptor = + internal_static_akka_cluster_ddata_LWWMap_descriptor.getNestedTypes().get(0); + internal_static_akka_cluster_ddata_LWWMap_Entry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_LWWMap_Entry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_akka_cluster_ddata_PNCounterMap_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_akka_cluster_ddata_PNCounterMap_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_PNCounterMap_descriptor, + new java.lang.String[] { "Keys", "Entries", }); + internal_static_akka_cluster_ddata_PNCounterMap_Entry_descriptor = + internal_static_akka_cluster_ddata_PNCounterMap_descriptor.getNestedTypes().get(0); + internal_static_akka_cluster_ddata_PNCounterMap_Entry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_PNCounterMap_Entry_descriptor, + new java.lang.String[] { "Key", "Value", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/akka-distributed-data/src/main/java/akka/cluster/ddata/protobuf/msg/ReplicatorMessages.java b/akka-distributed-data/src/main/java/akka/cluster/ddata/protobuf/msg/ReplicatorMessages.java new file mode 100644 index 0000000000..7df1e985dd --- /dev/null +++ b/akka-distributed-data/src/main/java/akka/cluster/ddata/protobuf/msg/ReplicatorMessages.java @@ -0,0 +1,14959 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: ReplicatorMessages.proto + +package akka.cluster.ddata.protobuf.msg; + +public final class ReplicatorMessages { + private ReplicatorMessages() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface GetOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.OtherMessage key = 1; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + boolean hasKey(); + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey(); + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder(); + + // required sint32 consistency = 2; + /** + * required sint32 consistency = 2; + */ + boolean hasConsistency(); + /** + * required sint32 consistency = 2; + */ + int getConsistency(); + + // required uint32 timeout = 3; + /** + * required uint32 timeout = 3; + */ + boolean hasTimeout(); + /** + * required uint32 timeout = 3; + */ + int getTimeout(); + + // optional .akka.cluster.ddata.OtherMessage request = 4; + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + boolean hasRequest(); + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getRequest(); + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getRequestOrBuilder(); + } + /** + * Protobuf type {@code akka.cluster.ddata.Get} + */ + public static final class Get extends + com.google.protobuf.GeneratedMessage + implements GetOrBuilder { + // Use Get.newBuilder() to construct. + private Get(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Get(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Get defaultInstance; + public static Get getDefaultInstance() { + return defaultInstance; + } + + public Get getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Get( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = key_.toBuilder(); + } + key_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(key_); + key_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 16: { + bitField0_ |= 0x00000002; + consistency_ = input.readSInt32(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + timeout_ = input.readUInt32(); + break; + } + case 34: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = request_.toBuilder(); + } + request_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(request_); + request_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Get_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Get_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Get parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Get(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .akka.cluster.ddata.OtherMessage key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage key_; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey() { + return key_; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder() { + return key_; + } + + // required sint32 consistency = 2; + public static final int CONSISTENCY_FIELD_NUMBER = 2; + private int consistency_; + /** + * required sint32 consistency = 2; + */ + public boolean hasConsistency() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required sint32 consistency = 2; + */ + public int getConsistency() { + return consistency_; + } + + // required uint32 timeout = 3; + public static final int TIMEOUT_FIELD_NUMBER = 3; + private int timeout_; + /** + * required uint32 timeout = 3; + */ + public boolean hasTimeout() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 timeout = 3; + */ + public int getTimeout() { + return timeout_; + } + + // optional .akka.cluster.ddata.OtherMessage request = 4; + public static final int REQUEST_FIELD_NUMBER = 4; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage request_; + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public boolean hasRequest() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getRequest() { + return request_; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getRequestOrBuilder() { + return request_; + } + + private void initFields() { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + consistency_ = 0; + timeout_ = 0; + request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasConsistency()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTimeout()) { + memoizedIsInitialized = 0; + return false; + } + if (!getKey().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasRequest()) { + if (!getRequest().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, key_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeSInt32(2, consistency_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt32(3, timeout_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, request_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, key_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeSInt32Size(2, consistency_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(3, timeout_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, request_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.Get} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Get_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Get_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getKeyFieldBuilder(); + getRequestFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (keyBuilder_ == null) { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + keyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + consistency_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + timeout_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + if (requestBuilder_ == null) { + request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + requestBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Get_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (keyBuilder_ == null) { + result.key_ = key_; + } else { + result.key_ = keyBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.consistency_ = consistency_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.timeout_ = timeout_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (requestBuilder_ == null) { + result.request_ = request_; + } else { + result.request_ = requestBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get.getDefaultInstance()) return this; + if (other.hasKey()) { + mergeKey(other.getKey()); + } + if (other.hasConsistency()) { + setConsistency(other.getConsistency()); + } + if (other.hasTimeout()) { + setTimeout(other.getTimeout()); + } + if (other.hasRequest()) { + mergeRequest(other.getRequest()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!hasConsistency()) { + + return false; + } + if (!hasTimeout()) { + + return false; + } + if (!getKey().isInitialized()) { + + return false; + } + if (hasRequest()) { + if (!getRequest().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Get) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.OtherMessage key = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> keyBuilder_; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey() { + if (keyBuilder_ == null) { + return key_; + } else { + return keyBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder setKey(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (keyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + key_ = value; + onChanged(); + } else { + keyBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder setKey( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (keyBuilder_ == null) { + key_ = builderForValue.build(); + onChanged(); + } else { + keyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder mergeKey(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (keyBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + key_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + key_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(key_).mergeFrom(value).buildPartial(); + } else { + key_ = value; + } + onChanged(); + } else { + keyBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder clearKey() { + if (keyBuilder_ == null) { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + keyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getKeyBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getKeyFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder() { + if (keyBuilder_ != null) { + return keyBuilder_.getMessageOrBuilder(); + } else { + return key_; + } + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getKeyFieldBuilder() { + if (keyBuilder_ == null) { + keyBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + key_, + getParentForChildren(), + isClean()); + key_ = null; + } + return keyBuilder_; + } + + // required sint32 consistency = 2; + private int consistency_ ; + /** + * required sint32 consistency = 2; + */ + public boolean hasConsistency() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required sint32 consistency = 2; + */ + public int getConsistency() { + return consistency_; + } + /** + * required sint32 consistency = 2; + */ + public Builder setConsistency(int value) { + bitField0_ |= 0x00000002; + consistency_ = value; + onChanged(); + return this; + } + /** + * required sint32 consistency = 2; + */ + public Builder clearConsistency() { + bitField0_ = (bitField0_ & ~0x00000002); + consistency_ = 0; + onChanged(); + return this; + } + + // required uint32 timeout = 3; + private int timeout_ ; + /** + * required uint32 timeout = 3; + */ + public boolean hasTimeout() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 timeout = 3; + */ + public int getTimeout() { + return timeout_; + } + /** + * required uint32 timeout = 3; + */ + public Builder setTimeout(int value) { + bitField0_ |= 0x00000004; + timeout_ = value; + onChanged(); + return this; + } + /** + * required uint32 timeout = 3; + */ + public Builder clearTimeout() { + bitField0_ = (bitField0_ & ~0x00000004); + timeout_ = 0; + onChanged(); + return this; + } + + // optional .akka.cluster.ddata.OtherMessage request = 4; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> requestBuilder_; + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public boolean hasRequest() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getRequest() { + if (requestBuilder_ == null) { + return request_; + } else { + return requestBuilder_.getMessage(); + } + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public Builder setRequest(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (requestBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + request_ = value; + onChanged(); + } else { + requestBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public Builder setRequest( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (requestBuilder_ == null) { + request_ = builderForValue.build(); + onChanged(); + } else { + requestBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public Builder mergeRequest(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (requestBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + request_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + request_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(request_).mergeFrom(value).buildPartial(); + } else { + request_ = value; + } + onChanged(); + } else { + requestBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public Builder clearRequest() { + if (requestBuilder_ == null) { + request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + requestBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getRequestBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getRequestFieldBuilder().getBuilder(); + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getRequestOrBuilder() { + if (requestBuilder_ != null) { + return requestBuilder_.getMessageOrBuilder(); + } else { + return request_; + } + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getRequestFieldBuilder() { + if (requestBuilder_ == null) { + requestBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + request_, + getParentForChildren(), + isClean()); + request_ = null; + } + return requestBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.Get) + } + + static { + defaultInstance = new Get(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.Get) + } + + public interface GetSuccessOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.OtherMessage key = 1; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + boolean hasKey(); + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey(); + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder(); + + // required .akka.cluster.ddata.OtherMessage data = 2; + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + boolean hasData(); + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getData(); + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getDataOrBuilder(); + + // optional .akka.cluster.ddata.OtherMessage request = 4; + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + boolean hasRequest(); + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getRequest(); + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getRequestOrBuilder(); + } + /** + * Protobuf type {@code akka.cluster.ddata.GetSuccess} + */ + public static final class GetSuccess extends + com.google.protobuf.GeneratedMessage + implements GetSuccessOrBuilder { + // Use GetSuccess.newBuilder() to construct. + private GetSuccess(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetSuccess(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetSuccess defaultInstance; + public static GetSuccess getDefaultInstance() { + return defaultInstance; + } + + public GetSuccess getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetSuccess( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = key_.toBuilder(); + } + key_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(key_); + key_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = data_.toBuilder(); + } + data_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(data_); + data_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 34: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = request_.toBuilder(); + } + request_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(request_); + request_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_GetSuccess_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_GetSuccess_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetSuccess parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetSuccess(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .akka.cluster.ddata.OtherMessage key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage key_; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey() { + return key_; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder() { + return key_; + } + + // required .akka.cluster.ddata.OtherMessage data = 2; + public static final int DATA_FIELD_NUMBER = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage data_; + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public boolean hasData() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getData() { + return data_; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getDataOrBuilder() { + return data_; + } + + // optional .akka.cluster.ddata.OtherMessage request = 4; + public static final int REQUEST_FIELD_NUMBER = 4; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage request_; + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public boolean hasRequest() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getRequest() { + return request_; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getRequestOrBuilder() { + return request_; + } + + private void initFields() { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + data_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasData()) { + memoizedIsInitialized = 0; + return false; + } + if (!getKey().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getData().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasRequest()) { + if (!getRequest().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, key_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, data_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(4, request_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, key_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, data_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, request_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.GetSuccess} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccessOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_GetSuccess_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_GetSuccess_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getKeyFieldBuilder(); + getDataFieldBuilder(); + getRequestFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (keyBuilder_ == null) { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + keyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (dataBuilder_ == null) { + data_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + dataBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (requestBuilder_ == null) { + request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + requestBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_GetSuccess_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (keyBuilder_ == null) { + result.key_ = key_; + } else { + result.key_ = keyBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (dataBuilder_ == null) { + result.data_ = data_; + } else { + result.data_ = dataBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (requestBuilder_ == null) { + result.request_ = request_; + } else { + result.request_ = requestBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess.getDefaultInstance()) return this; + if (other.hasKey()) { + mergeKey(other.getKey()); + } + if (other.hasData()) { + mergeData(other.getData()); + } + if (other.hasRequest()) { + mergeRequest(other.getRequest()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!hasData()) { + + return false; + } + if (!getKey().isInitialized()) { + + return false; + } + if (!getData().isInitialized()) { + + return false; + } + if (hasRequest()) { + if (!getRequest().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetSuccess) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.OtherMessage key = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> keyBuilder_; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey() { + if (keyBuilder_ == null) { + return key_; + } else { + return keyBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder setKey(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (keyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + key_ = value; + onChanged(); + } else { + keyBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder setKey( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (keyBuilder_ == null) { + key_ = builderForValue.build(); + onChanged(); + } else { + keyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder mergeKey(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (keyBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + key_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + key_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(key_).mergeFrom(value).buildPartial(); + } else { + key_ = value; + } + onChanged(); + } else { + keyBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder clearKey() { + if (keyBuilder_ == null) { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + keyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getKeyBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getKeyFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder() { + if (keyBuilder_ != null) { + return keyBuilder_.getMessageOrBuilder(); + } else { + return key_; + } + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getKeyFieldBuilder() { + if (keyBuilder_ == null) { + keyBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + key_, + getParentForChildren(), + isClean()); + key_ = null; + } + return keyBuilder_; + } + + // required .akka.cluster.ddata.OtherMessage data = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage data_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> dataBuilder_; + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public boolean hasData() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getData() { + if (dataBuilder_ == null) { + return data_; + } else { + return dataBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public Builder setData(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (dataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + data_ = value; + onChanged(); + } else { + dataBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public Builder setData( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (dataBuilder_ == null) { + data_ = builderForValue.build(); + onChanged(); + } else { + dataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public Builder mergeData(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (dataBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + data_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + data_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(data_).mergeFrom(value).buildPartial(); + } else { + data_ = value; + } + onChanged(); + } else { + dataBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public Builder clearData() { + if (dataBuilder_ == null) { + data_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + dataBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getDataBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getDataFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getDataOrBuilder() { + if (dataBuilder_ != null) { + return dataBuilder_.getMessageOrBuilder(); + } else { + return data_; + } + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getDataFieldBuilder() { + if (dataBuilder_ == null) { + dataBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + data_, + getParentForChildren(), + isClean()); + data_ = null; + } + return dataBuilder_; + } + + // optional .akka.cluster.ddata.OtherMessage request = 4; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> requestBuilder_; + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public boolean hasRequest() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getRequest() { + if (requestBuilder_ == null) { + return request_; + } else { + return requestBuilder_.getMessage(); + } + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public Builder setRequest(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (requestBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + request_ = value; + onChanged(); + } else { + requestBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public Builder setRequest( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (requestBuilder_ == null) { + request_ = builderForValue.build(); + onChanged(); + } else { + requestBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public Builder mergeRequest(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (requestBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + request_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + request_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(request_).mergeFrom(value).buildPartial(); + } else { + request_ = value; + } + onChanged(); + } else { + requestBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public Builder clearRequest() { + if (requestBuilder_ == null) { + request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + requestBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getRequestBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getRequestFieldBuilder().getBuilder(); + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getRequestOrBuilder() { + if (requestBuilder_ != null) { + return requestBuilder_.getMessageOrBuilder(); + } else { + return request_; + } + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getRequestFieldBuilder() { + if (requestBuilder_ == null) { + requestBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + request_, + getParentForChildren(), + isClean()); + request_ = null; + } + return requestBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.GetSuccess) + } + + static { + defaultInstance = new GetSuccess(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.GetSuccess) + } + + public interface NotFoundOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.OtherMessage key = 1; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + boolean hasKey(); + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey(); + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder(); + + // optional .akka.cluster.ddata.OtherMessage request = 2; + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + boolean hasRequest(); + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getRequest(); + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getRequestOrBuilder(); + } + /** + * Protobuf type {@code akka.cluster.ddata.NotFound} + */ + public static final class NotFound extends + com.google.protobuf.GeneratedMessage + implements NotFoundOrBuilder { + // Use NotFound.newBuilder() to construct. + private NotFound(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private NotFound(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final NotFound defaultInstance; + public static NotFound getDefaultInstance() { + return defaultInstance; + } + + public NotFound getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private NotFound( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = key_.toBuilder(); + } + key_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(key_); + key_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = request_.toBuilder(); + } + request_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(request_); + request_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_NotFound_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_NotFound_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public NotFound parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new NotFound(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .akka.cluster.ddata.OtherMessage key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage key_; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey() { + return key_; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder() { + return key_; + } + + // optional .akka.cluster.ddata.OtherMessage request = 2; + public static final int REQUEST_FIELD_NUMBER = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage request_; + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public boolean hasRequest() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getRequest() { + return request_; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getRequestOrBuilder() { + return request_; + } + + private void initFields() { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!getKey().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasRequest()) { + if (!getRequest().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, key_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, request_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, key_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, request_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.NotFound} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFoundOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_NotFound_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_NotFound_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getKeyFieldBuilder(); + getRequestFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (keyBuilder_ == null) { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + keyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (requestBuilder_ == null) { + request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + requestBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_NotFound_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (keyBuilder_ == null) { + result.key_ = key_; + } else { + result.key_ = keyBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (requestBuilder_ == null) { + result.request_ = request_; + } else { + result.request_ = requestBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound.getDefaultInstance()) return this; + if (other.hasKey()) { + mergeKey(other.getKey()); + } + if (other.hasRequest()) { + mergeRequest(other.getRequest()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!getKey().isInitialized()) { + + return false; + } + if (hasRequest()) { + if (!getRequest().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.NotFound) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.OtherMessage key = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> keyBuilder_; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey() { + if (keyBuilder_ == null) { + return key_; + } else { + return keyBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder setKey(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (keyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + key_ = value; + onChanged(); + } else { + keyBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder setKey( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (keyBuilder_ == null) { + key_ = builderForValue.build(); + onChanged(); + } else { + keyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder mergeKey(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (keyBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + key_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + key_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(key_).mergeFrom(value).buildPartial(); + } else { + key_ = value; + } + onChanged(); + } else { + keyBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder clearKey() { + if (keyBuilder_ == null) { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + keyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getKeyBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getKeyFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder() { + if (keyBuilder_ != null) { + return keyBuilder_.getMessageOrBuilder(); + } else { + return key_; + } + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getKeyFieldBuilder() { + if (keyBuilder_ == null) { + keyBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + key_, + getParentForChildren(), + isClean()); + key_ = null; + } + return keyBuilder_; + } + + // optional .akka.cluster.ddata.OtherMessage request = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> requestBuilder_; + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public boolean hasRequest() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getRequest() { + if (requestBuilder_ == null) { + return request_; + } else { + return requestBuilder_.getMessage(); + } + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public Builder setRequest(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (requestBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + request_ = value; + onChanged(); + } else { + requestBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public Builder setRequest( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (requestBuilder_ == null) { + request_ = builderForValue.build(); + onChanged(); + } else { + requestBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public Builder mergeRequest(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (requestBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + request_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + request_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(request_).mergeFrom(value).buildPartial(); + } else { + request_ = value; + } + onChanged(); + } else { + requestBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public Builder clearRequest() { + if (requestBuilder_ == null) { + request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + requestBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getRequestBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getRequestFieldBuilder().getBuilder(); + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getRequestOrBuilder() { + if (requestBuilder_ != null) { + return requestBuilder_.getMessageOrBuilder(); + } else { + return request_; + } + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getRequestFieldBuilder() { + if (requestBuilder_ == null) { + requestBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + request_, + getParentForChildren(), + isClean()); + request_ = null; + } + return requestBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.NotFound) + } + + static { + defaultInstance = new NotFound(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.NotFound) + } + + public interface GetFailureOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.OtherMessage key = 1; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + boolean hasKey(); + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey(); + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder(); + + // optional .akka.cluster.ddata.OtherMessage request = 2; + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + boolean hasRequest(); + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getRequest(); + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getRequestOrBuilder(); + } + /** + * Protobuf type {@code akka.cluster.ddata.GetFailure} + */ + public static final class GetFailure extends + com.google.protobuf.GeneratedMessage + implements GetFailureOrBuilder { + // Use GetFailure.newBuilder() to construct. + private GetFailure(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetFailure(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetFailure defaultInstance; + public static GetFailure getDefaultInstance() { + return defaultInstance; + } + + public GetFailure getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetFailure( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = key_.toBuilder(); + } + key_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(key_); + key_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = request_.toBuilder(); + } + request_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(request_); + request_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_GetFailure_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_GetFailure_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetFailure parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetFailure(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .akka.cluster.ddata.OtherMessage key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage key_; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey() { + return key_; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder() { + return key_; + } + + // optional .akka.cluster.ddata.OtherMessage request = 2; + public static final int REQUEST_FIELD_NUMBER = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage request_; + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public boolean hasRequest() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getRequest() { + return request_; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getRequestOrBuilder() { + return request_; + } + + private void initFields() { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!getKey().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasRequest()) { + if (!getRequest().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, key_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, request_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, key_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, request_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.GetFailure} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailureOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_GetFailure_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_GetFailure_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getKeyFieldBuilder(); + getRequestFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (keyBuilder_ == null) { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + keyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (requestBuilder_ == null) { + request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + requestBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_GetFailure_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (keyBuilder_ == null) { + result.key_ = key_; + } else { + result.key_ = keyBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (requestBuilder_ == null) { + result.request_ = request_; + } else { + result.request_ = requestBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure.getDefaultInstance()) return this; + if (other.hasKey()) { + mergeKey(other.getKey()); + } + if (other.hasRequest()) { + mergeRequest(other.getRequest()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!getKey().isInitialized()) { + + return false; + } + if (hasRequest()) { + if (!getRequest().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GetFailure) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.OtherMessage key = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> keyBuilder_; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey() { + if (keyBuilder_ == null) { + return key_; + } else { + return keyBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder setKey(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (keyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + key_ = value; + onChanged(); + } else { + keyBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder setKey( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (keyBuilder_ == null) { + key_ = builderForValue.build(); + onChanged(); + } else { + keyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder mergeKey(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (keyBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + key_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + key_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(key_).mergeFrom(value).buildPartial(); + } else { + key_ = value; + } + onChanged(); + } else { + keyBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder clearKey() { + if (keyBuilder_ == null) { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + keyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getKeyBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getKeyFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder() { + if (keyBuilder_ != null) { + return keyBuilder_.getMessageOrBuilder(); + } else { + return key_; + } + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getKeyFieldBuilder() { + if (keyBuilder_ == null) { + keyBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + key_, + getParentForChildren(), + isClean()); + key_ = null; + } + return keyBuilder_; + } + + // optional .akka.cluster.ddata.OtherMessage request = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> requestBuilder_; + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public boolean hasRequest() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getRequest() { + if (requestBuilder_ == null) { + return request_; + } else { + return requestBuilder_.getMessage(); + } + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public Builder setRequest(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (requestBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + request_ = value; + onChanged(); + } else { + requestBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public Builder setRequest( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (requestBuilder_ == null) { + request_ = builderForValue.build(); + onChanged(); + } else { + requestBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public Builder mergeRequest(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (requestBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + request_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + request_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(request_).mergeFrom(value).buildPartial(); + } else { + request_ = value; + } + onChanged(); + } else { + requestBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public Builder clearRequest() { + if (requestBuilder_ == null) { + request_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + requestBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getRequestBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getRequestFieldBuilder().getBuilder(); + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getRequestOrBuilder() { + if (requestBuilder_ != null) { + return requestBuilder_.getMessageOrBuilder(); + } else { + return request_; + } + } + /** + * optional .akka.cluster.ddata.OtherMessage request = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getRequestFieldBuilder() { + if (requestBuilder_ == null) { + requestBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + request_, + getParentForChildren(), + isClean()); + request_ = null; + } + return requestBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.GetFailure) + } + + static { + defaultInstance = new GetFailure(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.GetFailure) + } + + public interface SubscribeOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.OtherMessage key = 1; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + boolean hasKey(); + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey(); + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder(); + + // required string ref = 2; + /** + * required string ref = 2; + */ + boolean hasRef(); + /** + * required string ref = 2; + */ + java.lang.String getRef(); + /** + * required string ref = 2; + */ + com.google.protobuf.ByteString + getRefBytes(); + } + /** + * Protobuf type {@code akka.cluster.ddata.Subscribe} + */ + public static final class Subscribe extends + com.google.protobuf.GeneratedMessage + implements SubscribeOrBuilder { + // Use Subscribe.newBuilder() to construct. + private Subscribe(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Subscribe(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Subscribe defaultInstance; + public static Subscribe getDefaultInstance() { + return defaultInstance; + } + + public Subscribe getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Subscribe( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = key_.toBuilder(); + } + key_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(key_); + key_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + bitField0_ |= 0x00000002; + ref_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Subscribe_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Subscribe_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Subscribe parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Subscribe(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .akka.cluster.ddata.OtherMessage key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage key_; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey() { + return key_; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder() { + return key_; + } + + // required string ref = 2; + public static final int REF_FIELD_NUMBER = 2; + private java.lang.Object ref_; + /** + * required string ref = 2; + */ + public boolean hasRef() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string ref = 2; + */ + public java.lang.String getRef() { + java.lang.Object ref = ref_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + ref_ = s; + } + return s; + } + } + /** + * required string ref = 2; + */ + public com.google.protobuf.ByteString + getRefBytes() { + java.lang.Object ref = ref_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + ref_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + ref_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRef()) { + memoizedIsInitialized = 0; + return false; + } + if (!getKey().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, key_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getRefBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, key_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getRefBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.Subscribe} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.SubscribeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Subscribe_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Subscribe_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getKeyFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (keyBuilder_ == null) { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + keyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + ref_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Subscribe_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (keyBuilder_ == null) { + result.key_ = key_; + } else { + result.key_ = keyBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.ref_ = ref_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe.getDefaultInstance()) return this; + if (other.hasKey()) { + mergeKey(other.getKey()); + } + if (other.hasRef()) { + bitField0_ |= 0x00000002; + ref_ = other.ref_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!hasRef()) { + + return false; + } + if (!getKey().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Subscribe) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.OtherMessage key = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> keyBuilder_; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey() { + if (keyBuilder_ == null) { + return key_; + } else { + return keyBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder setKey(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (keyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + key_ = value; + onChanged(); + } else { + keyBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder setKey( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (keyBuilder_ == null) { + key_ = builderForValue.build(); + onChanged(); + } else { + keyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder mergeKey(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (keyBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + key_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + key_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(key_).mergeFrom(value).buildPartial(); + } else { + key_ = value; + } + onChanged(); + } else { + keyBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder clearKey() { + if (keyBuilder_ == null) { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + keyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getKeyBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getKeyFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder() { + if (keyBuilder_ != null) { + return keyBuilder_.getMessageOrBuilder(); + } else { + return key_; + } + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getKeyFieldBuilder() { + if (keyBuilder_ == null) { + keyBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + key_, + getParentForChildren(), + isClean()); + key_ = null; + } + return keyBuilder_; + } + + // required string ref = 2; + private java.lang.Object ref_ = ""; + /** + * required string ref = 2; + */ + public boolean hasRef() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string ref = 2; + */ + public java.lang.String getRef() { + java.lang.Object ref = ref_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + ref_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string ref = 2; + */ + public com.google.protobuf.ByteString + getRefBytes() { + java.lang.Object ref = ref_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + ref_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string ref = 2; + */ + public Builder setRef( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + ref_ = value; + onChanged(); + return this; + } + /** + * required string ref = 2; + */ + public Builder clearRef() { + bitField0_ = (bitField0_ & ~0x00000002); + ref_ = getDefaultInstance().getRef(); + onChanged(); + return this; + } + /** + * required string ref = 2; + */ + public Builder setRefBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + ref_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.Subscribe) + } + + static { + defaultInstance = new Subscribe(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.Subscribe) + } + + public interface UnsubscribeOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.OtherMessage key = 1; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + boolean hasKey(); + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey(); + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder(); + + // required string ref = 2; + /** + * required string ref = 2; + */ + boolean hasRef(); + /** + * required string ref = 2; + */ + java.lang.String getRef(); + /** + * required string ref = 2; + */ + com.google.protobuf.ByteString + getRefBytes(); + } + /** + * Protobuf type {@code akka.cluster.ddata.Unsubscribe} + */ + public static final class Unsubscribe extends + com.google.protobuf.GeneratedMessage + implements UnsubscribeOrBuilder { + // Use Unsubscribe.newBuilder() to construct. + private Unsubscribe(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Unsubscribe(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Unsubscribe defaultInstance; + public static Unsubscribe getDefaultInstance() { + return defaultInstance; + } + + public Unsubscribe getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Unsubscribe( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = key_.toBuilder(); + } + key_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(key_); + key_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + bitField0_ |= 0x00000002; + ref_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Unsubscribe_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Unsubscribe_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Unsubscribe parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Unsubscribe(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .akka.cluster.ddata.OtherMessage key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage key_; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey() { + return key_; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder() { + return key_; + } + + // required string ref = 2; + public static final int REF_FIELD_NUMBER = 2; + private java.lang.Object ref_; + /** + * required string ref = 2; + */ + public boolean hasRef() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string ref = 2; + */ + public java.lang.String getRef() { + java.lang.Object ref = ref_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + ref_ = s; + } + return s; + } + } + /** + * required string ref = 2; + */ + public com.google.protobuf.ByteString + getRefBytes() { + java.lang.Object ref = ref_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + ref_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + ref_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRef()) { + memoizedIsInitialized = 0; + return false; + } + if (!getKey().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, key_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getRefBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, key_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getRefBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.Unsubscribe} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UnsubscribeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Unsubscribe_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Unsubscribe_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getKeyFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (keyBuilder_ == null) { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + keyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + ref_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Unsubscribe_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (keyBuilder_ == null) { + result.key_ = key_; + } else { + result.key_ = keyBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.ref_ = ref_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe.getDefaultInstance()) return this; + if (other.hasKey()) { + mergeKey(other.getKey()); + } + if (other.hasRef()) { + bitField0_ |= 0x00000002; + ref_ = other.ref_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!hasRef()) { + + return false; + } + if (!getKey().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Unsubscribe) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.OtherMessage key = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> keyBuilder_; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey() { + if (keyBuilder_ == null) { + return key_; + } else { + return keyBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder setKey(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (keyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + key_ = value; + onChanged(); + } else { + keyBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder setKey( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (keyBuilder_ == null) { + key_ = builderForValue.build(); + onChanged(); + } else { + keyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder mergeKey(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (keyBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + key_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + key_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(key_).mergeFrom(value).buildPartial(); + } else { + key_ = value; + } + onChanged(); + } else { + keyBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder clearKey() { + if (keyBuilder_ == null) { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + keyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getKeyBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getKeyFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder() { + if (keyBuilder_ != null) { + return keyBuilder_.getMessageOrBuilder(); + } else { + return key_; + } + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getKeyFieldBuilder() { + if (keyBuilder_ == null) { + keyBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + key_, + getParentForChildren(), + isClean()); + key_ = null; + } + return keyBuilder_; + } + + // required string ref = 2; + private java.lang.Object ref_ = ""; + /** + * required string ref = 2; + */ + public boolean hasRef() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string ref = 2; + */ + public java.lang.String getRef() { + java.lang.Object ref = ref_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + ref_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string ref = 2; + */ + public com.google.protobuf.ByteString + getRefBytes() { + java.lang.Object ref = ref_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + ref_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string ref = 2; + */ + public Builder setRef( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + ref_ = value; + onChanged(); + return this; + } + /** + * required string ref = 2; + */ + public Builder clearRef() { + bitField0_ = (bitField0_ & ~0x00000002); + ref_ = getDefaultInstance().getRef(); + onChanged(); + return this; + } + /** + * required string ref = 2; + */ + public Builder setRefBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + ref_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.Unsubscribe) + } + + static { + defaultInstance = new Unsubscribe(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.Unsubscribe) + } + + public interface ChangedOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.OtherMessage key = 1; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + boolean hasKey(); + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey(); + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder(); + + // required .akka.cluster.ddata.OtherMessage data = 2; + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + boolean hasData(); + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getData(); + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getDataOrBuilder(); + } + /** + * Protobuf type {@code akka.cluster.ddata.Changed} + */ + public static final class Changed extends + com.google.protobuf.GeneratedMessage + implements ChangedOrBuilder { + // Use Changed.newBuilder() to construct. + private Changed(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Changed(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Changed defaultInstance; + public static Changed getDefaultInstance() { + return defaultInstance; + } + + public Changed getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Changed( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = key_.toBuilder(); + } + key_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(key_); + key_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = data_.toBuilder(); + } + data_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(data_); + data_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Changed_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Changed_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Changed parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Changed(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .akka.cluster.ddata.OtherMessage key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage key_; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey() { + return key_; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder() { + return key_; + } + + // required .akka.cluster.ddata.OtherMessage data = 2; + public static final int DATA_FIELD_NUMBER = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage data_; + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public boolean hasData() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getData() { + return data_; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getDataOrBuilder() { + return data_; + } + + private void initFields() { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + data_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasData()) { + memoizedIsInitialized = 0; + return false; + } + if (!getKey().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getData().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, key_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, data_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, key_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, data_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.Changed} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ChangedOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Changed_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Changed_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getKeyFieldBuilder(); + getDataFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (keyBuilder_ == null) { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + keyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (dataBuilder_ == null) { + data_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + dataBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Changed_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (keyBuilder_ == null) { + result.key_ = key_; + } else { + result.key_ = keyBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (dataBuilder_ == null) { + result.data_ = data_; + } else { + result.data_ = dataBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed.getDefaultInstance()) return this; + if (other.hasKey()) { + mergeKey(other.getKey()); + } + if (other.hasData()) { + mergeData(other.getData()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!hasData()) { + + return false; + } + if (!getKey().isInitialized()) { + + return false; + } + if (!getData().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Changed) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.OtherMessage key = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> keyBuilder_; + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getKey() { + if (keyBuilder_ == null) { + return key_; + } else { + return keyBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder setKey(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (keyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + key_ = value; + onChanged(); + } else { + keyBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder setKey( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (keyBuilder_ == null) { + key_ = builderForValue.build(); + onChanged(); + } else { + keyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder mergeKey(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (keyBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + key_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + key_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(key_).mergeFrom(value).buildPartial(); + } else { + key_ = value; + } + onChanged(); + } else { + keyBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public Builder clearKey() { + if (keyBuilder_ == null) { + key_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + keyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getKeyBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getKeyFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getKeyOrBuilder() { + if (keyBuilder_ != null) { + return keyBuilder_.getMessageOrBuilder(); + } else { + return key_; + } + } + /** + * required .akka.cluster.ddata.OtherMessage key = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getKeyFieldBuilder() { + if (keyBuilder_ == null) { + keyBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + key_, + getParentForChildren(), + isClean()); + key_ = null; + } + return keyBuilder_; + } + + // required .akka.cluster.ddata.OtherMessage data = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage data_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> dataBuilder_; + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public boolean hasData() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getData() { + if (dataBuilder_ == null) { + return data_; + } else { + return dataBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public Builder setData(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (dataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + data_ = value; + onChanged(); + } else { + dataBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public Builder setData( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (dataBuilder_ == null) { + data_ = builderForValue.build(); + onChanged(); + } else { + dataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public Builder mergeData(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (dataBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + data_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + data_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(data_).mergeFrom(value).buildPartial(); + } else { + data_ = value; + } + onChanged(); + } else { + dataBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public Builder clearData() { + if (dataBuilder_ == null) { + data_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + dataBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getDataBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getDataFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getDataOrBuilder() { + if (dataBuilder_ != null) { + return dataBuilder_.getMessageOrBuilder(); + } else { + return data_; + } + } + /** + * required .akka.cluster.ddata.OtherMessage data = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getDataFieldBuilder() { + if (dataBuilder_ == null) { + dataBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + data_, + getParentForChildren(), + isClean()); + data_ = null; + } + return dataBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.Changed) + } + + static { + defaultInstance = new Changed(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.Changed) + } + + public interface WriteOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string key = 1; + /** + * required string key = 1; + */ + boolean hasKey(); + /** + * required string key = 1; + */ + java.lang.String getKey(); + /** + * required string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(); + + // required .akka.cluster.ddata.DataEnvelope envelope = 2; + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + boolean hasEnvelope(); + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope getEnvelope(); + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder getEnvelopeOrBuilder(); + } + /** + * Protobuf type {@code akka.cluster.ddata.Write} + */ + public static final class Write extends + com.google.protobuf.GeneratedMessage + implements WriteOrBuilder { + // Use Write.newBuilder() to construct. + private Write(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Write(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Write defaultInstance; + public static Write getDefaultInstance() { + return defaultInstance; + } + + public Write getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Write( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + key_ = input.readBytes(); + break; + } + case 18: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = envelope_.toBuilder(); + } + envelope_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(envelope_); + envelope_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Write_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Write_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Write parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Write(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private java.lang.Object key_; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .akka.cluster.ddata.DataEnvelope envelope = 2; + public static final int ENVELOPE_FIELD_NUMBER = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope envelope_; + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public boolean hasEnvelope() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope getEnvelope() { + return envelope_; + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder getEnvelopeOrBuilder() { + return envelope_; + } + + private void initFields() { + key_ = ""; + envelope_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasEnvelope()) { + memoizedIsInitialized = 0; + return false; + } + if (!getEnvelope().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, envelope_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, envelope_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.Write} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.WriteOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Write_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Write_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getEnvelopeFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (envelopeBuilder_ == null) { + envelope_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance(); + } else { + envelopeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Write_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.key_ = key_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (envelopeBuilder_ == null) { + result.envelope_ = envelope_; + } else { + result.envelope_ = envelopeBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write.getDefaultInstance()) return this; + if (other.hasKey()) { + bitField0_ |= 0x00000001; + key_ = other.key_; + onChanged(); + } + if (other.hasEnvelope()) { + mergeEnvelope(other.getEnvelope()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!hasEnvelope()) { + + return false; + } + if (!getEnvelope().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Write) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string key = 1; + private java.lang.Object key_ = ""; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string key = 1; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000001); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + + // required .akka.cluster.ddata.DataEnvelope envelope = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope envelope_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder> envelopeBuilder_; + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public boolean hasEnvelope() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope getEnvelope() { + if (envelopeBuilder_ == null) { + return envelope_; + } else { + return envelopeBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public Builder setEnvelope(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope value) { + if (envelopeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + envelope_ = value; + onChanged(); + } else { + envelopeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public Builder setEnvelope( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder builderForValue) { + if (envelopeBuilder_ == null) { + envelope_ = builderForValue.build(); + onChanged(); + } else { + envelopeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public Builder mergeEnvelope(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope value) { + if (envelopeBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + envelope_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance()) { + envelope_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.newBuilder(envelope_).mergeFrom(value).buildPartial(); + } else { + envelope_ = value; + } + onChanged(); + } else { + envelopeBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public Builder clearEnvelope() { + if (envelopeBuilder_ == null) { + envelope_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance(); + onChanged(); + } else { + envelopeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder getEnvelopeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getEnvelopeFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder getEnvelopeOrBuilder() { + if (envelopeBuilder_ != null) { + return envelopeBuilder_.getMessageOrBuilder(); + } else { + return envelope_; + } + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder> + getEnvelopeFieldBuilder() { + if (envelopeBuilder_ == null) { + envelopeBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder>( + envelope_, + getParentForChildren(), + isClean()); + envelope_ = null; + } + return envelopeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.Write) + } + + static { + defaultInstance = new Write(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.Write) + } + + public interface EmptyOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code akka.cluster.ddata.Empty} + */ + public static final class Empty extends + com.google.protobuf.GeneratedMessage + implements EmptyOrBuilder { + // Use Empty.newBuilder() to construct. + private Empty(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Empty(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Empty defaultInstance; + public static Empty getDefaultInstance() { + return defaultInstance; + } + + public Empty getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Empty( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Empty_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Empty_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Empty parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Empty(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.Empty} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.EmptyOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Empty_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Empty_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Empty_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Empty) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.Empty) + } + + static { + defaultInstance = new Empty(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.Empty) + } + + public interface ReadOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string key = 1; + /** + * required string key = 1; + */ + boolean hasKey(); + /** + * required string key = 1; + */ + java.lang.String getKey(); + /** + * required string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(); + } + /** + * Protobuf type {@code akka.cluster.ddata.Read} + */ + public static final class Read extends + com.google.protobuf.GeneratedMessage + implements ReadOrBuilder { + // Use Read.newBuilder() to construct. + private Read(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Read(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Read defaultInstance; + public static Read getDefaultInstance() { + return defaultInstance; + } + + public Read getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Read( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + key_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Read_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Read_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Read parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Read(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private java.lang.Object key_; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + key_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.Read} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Read_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Read_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Read_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.key_ = key_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read.getDefaultInstance()) return this; + if (other.hasKey()) { + bitField0_ |= 0x00000001; + key_ = other.key_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Read) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string key = 1; + private java.lang.Object key_ = ""; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string key = 1; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000001); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.Read) + } + + static { + defaultInstance = new Read(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.Read) + } + + public interface ReadResultOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .akka.cluster.ddata.DataEnvelope envelope = 1; + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + boolean hasEnvelope(); + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope getEnvelope(); + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder getEnvelopeOrBuilder(); + } + /** + * Protobuf type {@code akka.cluster.ddata.ReadResult} + */ + public static final class ReadResult extends + com.google.protobuf.GeneratedMessage + implements ReadResultOrBuilder { + // Use ReadResult.newBuilder() to construct. + private ReadResult(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ReadResult(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ReadResult defaultInstance; + public static ReadResult getDefaultInstance() { + return defaultInstance; + } + + public ReadResult getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ReadResult( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = envelope_.toBuilder(); + } + envelope_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(envelope_); + envelope_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_ReadResult_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_ReadResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ReadResult parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReadResult(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .akka.cluster.ddata.DataEnvelope envelope = 1; + public static final int ENVELOPE_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope envelope_; + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + public boolean hasEnvelope() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope getEnvelope() { + return envelope_; + } + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder getEnvelopeOrBuilder() { + return envelope_; + } + + private void initFields() { + envelope_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasEnvelope()) { + if (!getEnvelope().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, envelope_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, envelope_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.ReadResult} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResultOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_ReadResult_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_ReadResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getEnvelopeFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (envelopeBuilder_ == null) { + envelope_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance(); + } else { + envelopeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_ReadResult_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (envelopeBuilder_ == null) { + result.envelope_ = envelope_; + } else { + result.envelope_ = envelopeBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult.getDefaultInstance()) return this; + if (other.hasEnvelope()) { + mergeEnvelope(other.getEnvelope()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasEnvelope()) { + if (!getEnvelope().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.ReadResult) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .akka.cluster.ddata.DataEnvelope envelope = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope envelope_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder> envelopeBuilder_; + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + public boolean hasEnvelope() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope getEnvelope() { + if (envelopeBuilder_ == null) { + return envelope_; + } else { + return envelopeBuilder_.getMessage(); + } + } + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + public Builder setEnvelope(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope value) { + if (envelopeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + envelope_ = value; + onChanged(); + } else { + envelopeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + public Builder setEnvelope( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder builderForValue) { + if (envelopeBuilder_ == null) { + envelope_ = builderForValue.build(); + onChanged(); + } else { + envelopeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + public Builder mergeEnvelope(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope value) { + if (envelopeBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + envelope_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance()) { + envelope_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.newBuilder(envelope_).mergeFrom(value).buildPartial(); + } else { + envelope_ = value; + } + onChanged(); + } else { + envelopeBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + public Builder clearEnvelope() { + if (envelopeBuilder_ == null) { + envelope_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance(); + onChanged(); + } else { + envelopeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder getEnvelopeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getEnvelopeFieldBuilder().getBuilder(); + } + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder getEnvelopeOrBuilder() { + if (envelopeBuilder_ != null) { + return envelopeBuilder_.getMessageOrBuilder(); + } else { + return envelope_; + } + } + /** + * optional .akka.cluster.ddata.DataEnvelope envelope = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder> + getEnvelopeFieldBuilder() { + if (envelopeBuilder_ == null) { + envelopeBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder>( + envelope_, + getParentForChildren(), + isClean()); + envelope_ = null; + } + return envelopeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.ReadResult) + } + + static { + defaultInstance = new ReadResult(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.ReadResult) + } + + public interface DataEnvelopeOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.OtherMessage data = 1; + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + boolean hasData(); + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getData(); + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getDataOrBuilder(); + + // repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + java.util.List + getPruningList(); + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry getPruning(int index); + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + int getPruningCount(); + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + java.util.List + getPruningOrBuilderList(); + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntryOrBuilder getPruningOrBuilder( + int index); + } + /** + * Protobuf type {@code akka.cluster.ddata.DataEnvelope} + */ + public static final class DataEnvelope extends + com.google.protobuf.GeneratedMessage + implements DataEnvelopeOrBuilder { + // Use DataEnvelope.newBuilder() to construct. + private DataEnvelope(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private DataEnvelope(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final DataEnvelope defaultInstance; + public static DataEnvelope getDefaultInstance() { + return defaultInstance; + } + + public DataEnvelope getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DataEnvelope( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = data_.toBuilder(); + } + data_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(data_); + data_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + pruning_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + pruning_.add(input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + pruning_ = java.util.Collections.unmodifiableList(pruning_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_DataEnvelope_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_DataEnvelope_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public DataEnvelope parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DataEnvelope(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface PruningEntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + boolean hasRemovedAddress(); + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getRemovedAddress(); + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getRemovedAddressOrBuilder(); + + // required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + boolean hasOwnerAddress(); + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getOwnerAddress(); + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getOwnerAddressOrBuilder(); + + // required bool performed = 3; + /** + * required bool performed = 3; + */ + boolean hasPerformed(); + /** + * required bool performed = 3; + */ + boolean getPerformed(); + + // repeated .akka.cluster.ddata.Address seen = 4; + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + java.util.List + getSeenList(); + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address getSeen(int index); + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + int getSeenCount(); + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + java.util.List + getSeenOrBuilderList(); + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.AddressOrBuilder getSeenOrBuilder( + int index); + } + /** + * Protobuf type {@code akka.cluster.ddata.DataEnvelope.PruningEntry} + */ + public static final class PruningEntry extends + com.google.protobuf.GeneratedMessage + implements PruningEntryOrBuilder { + // Use PruningEntry.newBuilder() to construct. + private PruningEntry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private PruningEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final PruningEntry defaultInstance; + public static PruningEntry getDefaultInstance() { + return defaultInstance; + } + + public PruningEntry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PruningEntry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = removedAddress_.toBuilder(); + } + removedAddress_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(removedAddress_); + removedAddress_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = ownerAddress_.toBuilder(); + } + ownerAddress_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(ownerAddress_); + ownerAddress_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 24: { + bitField0_ |= 0x00000004; + performed_ = input.readBool(); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + seen_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + seen_.add(input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + seen_ = java.util.Collections.unmodifiableList(seen_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_DataEnvelope_PruningEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_DataEnvelope_PruningEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PruningEntry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PruningEntry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + public static final int REMOVEDADDRESS_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress removedAddress_; + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + public boolean hasRemovedAddress() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getRemovedAddress() { + return removedAddress_; + } + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getRemovedAddressOrBuilder() { + return removedAddress_; + } + + // required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + public static final int OWNERADDRESS_FIELD_NUMBER = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress ownerAddress_; + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + public boolean hasOwnerAddress() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getOwnerAddress() { + return ownerAddress_; + } + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getOwnerAddressOrBuilder() { + return ownerAddress_; + } + + // required bool performed = 3; + public static final int PERFORMED_FIELD_NUMBER = 3; + private boolean performed_; + /** + * required bool performed = 3; + */ + public boolean hasPerformed() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bool performed = 3; + */ + public boolean getPerformed() { + return performed_; + } + + // repeated .akka.cluster.ddata.Address seen = 4; + public static final int SEEN_FIELD_NUMBER = 4; + private java.util.List seen_; + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public java.util.List getSeenList() { + return seen_; + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public java.util.List + getSeenOrBuilderList() { + return seen_; + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public int getSeenCount() { + return seen_.size(); + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address getSeen(int index) { + return seen_.get(index); + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.AddressOrBuilder getSeenOrBuilder( + int index) { + return seen_.get(index); + } + + private void initFields() { + removedAddress_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + ownerAddress_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + performed_ = false; + seen_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRemovedAddress()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasOwnerAddress()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasPerformed()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRemovedAddress().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getOwnerAddress().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getSeenCount(); i++) { + if (!getSeen(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, removedAddress_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, ownerAddress_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBool(3, performed_); + } + for (int i = 0; i < seen_.size(); i++) { + output.writeMessage(4, seen_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, removedAddress_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, ownerAddress_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, performed_); + } + for (int i = 0; i < seen_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, seen_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.DataEnvelope.PruningEntry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_DataEnvelope_PruningEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_DataEnvelope_PruningEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRemovedAddressFieldBuilder(); + getOwnerAddressFieldBuilder(); + getSeenFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (removedAddressBuilder_ == null) { + removedAddress_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + } else { + removedAddressBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (ownerAddressBuilder_ == null) { + ownerAddress_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + } else { + ownerAddressBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + performed_ = false; + bitField0_ = (bitField0_ & ~0x00000004); + if (seenBuilder_ == null) { + seen_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + seenBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_DataEnvelope_PruningEntry_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (removedAddressBuilder_ == null) { + result.removedAddress_ = removedAddress_; + } else { + result.removedAddress_ = removedAddressBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (ownerAddressBuilder_ == null) { + result.ownerAddress_ = ownerAddress_; + } else { + result.ownerAddress_ = ownerAddressBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.performed_ = performed_; + if (seenBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + seen_ = java.util.Collections.unmodifiableList(seen_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.seen_ = seen_; + } else { + result.seen_ = seenBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.getDefaultInstance()) return this; + if (other.hasRemovedAddress()) { + mergeRemovedAddress(other.getRemovedAddress()); + } + if (other.hasOwnerAddress()) { + mergeOwnerAddress(other.getOwnerAddress()); + } + if (other.hasPerformed()) { + setPerformed(other.getPerformed()); + } + if (seenBuilder_ == null) { + if (!other.seen_.isEmpty()) { + if (seen_.isEmpty()) { + seen_ = other.seen_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureSeenIsMutable(); + seen_.addAll(other.seen_); + } + onChanged(); + } + } else { + if (!other.seen_.isEmpty()) { + if (seenBuilder_.isEmpty()) { + seenBuilder_.dispose(); + seenBuilder_ = null; + seen_ = other.seen_; + bitField0_ = (bitField0_ & ~0x00000008); + seenBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getSeenFieldBuilder() : null; + } else { + seenBuilder_.addAllMessages(other.seen_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRemovedAddress()) { + + return false; + } + if (!hasOwnerAddress()) { + + return false; + } + if (!hasPerformed()) { + + return false; + } + if (!getRemovedAddress().isInitialized()) { + + return false; + } + if (!getOwnerAddress().isInitialized()) { + + return false; + } + for (int i = 0; i < getSeenCount(); i++) { + if (!getSeen(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress removedAddress_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder> removedAddressBuilder_; + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + public boolean hasRemovedAddress() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getRemovedAddress() { + if (removedAddressBuilder_ == null) { + return removedAddress_; + } else { + return removedAddressBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + public Builder setRemovedAddress(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress value) { + if (removedAddressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + removedAddress_ = value; + onChanged(); + } else { + removedAddressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + public Builder setRemovedAddress( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder builderForValue) { + if (removedAddressBuilder_ == null) { + removedAddress_ = builderForValue.build(); + onChanged(); + } else { + removedAddressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + public Builder mergeRemovedAddress(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress value) { + if (removedAddressBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + removedAddress_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance()) { + removedAddress_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.newBuilder(removedAddress_).mergeFrom(value).buildPartial(); + } else { + removedAddress_ = value; + } + onChanged(); + } else { + removedAddressBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + public Builder clearRemovedAddress() { + if (removedAddressBuilder_ == null) { + removedAddress_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + onChanged(); + } else { + removedAddressBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder getRemovedAddressBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRemovedAddressFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getRemovedAddressOrBuilder() { + if (removedAddressBuilder_ != null) { + return removedAddressBuilder_.getMessageOrBuilder(); + } else { + return removedAddress_; + } + } + /** + * required .akka.cluster.ddata.UniqueAddress removedAddress = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder> + getRemovedAddressFieldBuilder() { + if (removedAddressBuilder_ == null) { + removedAddressBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder>( + removedAddress_, + getParentForChildren(), + isClean()); + removedAddress_ = null; + } + return removedAddressBuilder_; + } + + // required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress ownerAddress_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder> ownerAddressBuilder_; + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + public boolean hasOwnerAddress() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getOwnerAddress() { + if (ownerAddressBuilder_ == null) { + return ownerAddress_; + } else { + return ownerAddressBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + public Builder setOwnerAddress(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress value) { + if (ownerAddressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ownerAddress_ = value; + onChanged(); + } else { + ownerAddressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + public Builder setOwnerAddress( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder builderForValue) { + if (ownerAddressBuilder_ == null) { + ownerAddress_ = builderForValue.build(); + onChanged(); + } else { + ownerAddressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + public Builder mergeOwnerAddress(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress value) { + if (ownerAddressBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + ownerAddress_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance()) { + ownerAddress_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.newBuilder(ownerAddress_).mergeFrom(value).buildPartial(); + } else { + ownerAddress_ = value; + } + onChanged(); + } else { + ownerAddressBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + public Builder clearOwnerAddress() { + if (ownerAddressBuilder_ == null) { + ownerAddress_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + onChanged(); + } else { + ownerAddressBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder getOwnerAddressBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getOwnerAddressFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder getOwnerAddressOrBuilder() { + if (ownerAddressBuilder_ != null) { + return ownerAddressBuilder_.getMessageOrBuilder(); + } else { + return ownerAddress_; + } + } + /** + * required .akka.cluster.ddata.UniqueAddress ownerAddress = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder> + getOwnerAddressFieldBuilder() { + if (ownerAddressBuilder_ == null) { + ownerAddressBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder>( + ownerAddress_, + getParentForChildren(), + isClean()); + ownerAddress_ = null; + } + return ownerAddressBuilder_; + } + + // required bool performed = 3; + private boolean performed_ ; + /** + * required bool performed = 3; + */ + public boolean hasPerformed() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bool performed = 3; + */ + public boolean getPerformed() { + return performed_; + } + /** + * required bool performed = 3; + */ + public Builder setPerformed(boolean value) { + bitField0_ |= 0x00000004; + performed_ = value; + onChanged(); + return this; + } + /** + * required bool performed = 3; + */ + public Builder clearPerformed() { + bitField0_ = (bitField0_ & ~0x00000004); + performed_ = false; + onChanged(); + return this; + } + + // repeated .akka.cluster.ddata.Address seen = 4; + private java.util.List seen_ = + java.util.Collections.emptyList(); + private void ensureSeenIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + seen_ = new java.util.ArrayList(seen_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.AddressOrBuilder> seenBuilder_; + + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public java.util.List getSeenList() { + if (seenBuilder_ == null) { + return java.util.Collections.unmodifiableList(seen_); + } else { + return seenBuilder_.getMessageList(); + } + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public int getSeenCount() { + if (seenBuilder_ == null) { + return seen_.size(); + } else { + return seenBuilder_.getCount(); + } + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address getSeen(int index) { + if (seenBuilder_ == null) { + return seen_.get(index); + } else { + return seenBuilder_.getMessage(index); + } + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public Builder setSeen( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address value) { + if (seenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSeenIsMutable(); + seen_.set(index, value); + onChanged(); + } else { + seenBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public Builder setSeen( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder builderForValue) { + if (seenBuilder_ == null) { + ensureSeenIsMutable(); + seen_.set(index, builderForValue.build()); + onChanged(); + } else { + seenBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public Builder addSeen(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address value) { + if (seenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSeenIsMutable(); + seen_.add(value); + onChanged(); + } else { + seenBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public Builder addSeen( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address value) { + if (seenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSeenIsMutable(); + seen_.add(index, value); + onChanged(); + } else { + seenBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public Builder addSeen( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder builderForValue) { + if (seenBuilder_ == null) { + ensureSeenIsMutable(); + seen_.add(builderForValue.build()); + onChanged(); + } else { + seenBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public Builder addSeen( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder builderForValue) { + if (seenBuilder_ == null) { + ensureSeenIsMutable(); + seen_.add(index, builderForValue.build()); + onChanged(); + } else { + seenBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public Builder addAllSeen( + java.lang.Iterable values) { + if (seenBuilder_ == null) { + ensureSeenIsMutable(); + super.addAll(values, seen_); + onChanged(); + } else { + seenBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public Builder clearSeen() { + if (seenBuilder_ == null) { + seen_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + seenBuilder_.clear(); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public Builder removeSeen(int index) { + if (seenBuilder_ == null) { + ensureSeenIsMutable(); + seen_.remove(index); + onChanged(); + } else { + seenBuilder_.remove(index); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder getSeenBuilder( + int index) { + return getSeenFieldBuilder().getBuilder(index); + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.AddressOrBuilder getSeenOrBuilder( + int index) { + if (seenBuilder_ == null) { + return seen_.get(index); } else { + return seenBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public java.util.List + getSeenOrBuilderList() { + if (seenBuilder_ != null) { + return seenBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(seen_); + } + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder addSeenBuilder() { + return getSeenFieldBuilder().addBuilder( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder addSeenBuilder( + int index) { + return getSeenFieldBuilder().addBuilder( + index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.Address seen = 4; + */ + public java.util.List + getSeenBuilderList() { + return getSeenFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.AddressOrBuilder> + getSeenFieldBuilder() { + if (seenBuilder_ == null) { + seenBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.AddressOrBuilder>( + seen_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + seen_ = null; + } + return seenBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.DataEnvelope.PruningEntry) + } + + static { + defaultInstance = new PruningEntry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.DataEnvelope.PruningEntry) + } + + private int bitField0_; + // required .akka.cluster.ddata.OtherMessage data = 1; + public static final int DATA_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage data_; + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + public boolean hasData() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getData() { + return data_; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getDataOrBuilder() { + return data_; + } + + // repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + public static final int PRUNING_FIELD_NUMBER = 2; + private java.util.List pruning_; + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public java.util.List getPruningList() { + return pruning_; + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public java.util.List + getPruningOrBuilderList() { + return pruning_; + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public int getPruningCount() { + return pruning_.size(); + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry getPruning(int index) { + return pruning_.get(index); + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntryOrBuilder getPruningOrBuilder( + int index) { + return pruning_.get(index); + } + + private void initFields() { + data_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + pruning_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasData()) { + memoizedIsInitialized = 0; + return false; + } + if (!getData().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getPruningCount(); i++) { + if (!getPruning(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, data_); + } + for (int i = 0; i < pruning_.size(); i++) { + output.writeMessage(2, pruning_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, data_); + } + for (int i = 0; i < pruning_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, pruning_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.DataEnvelope} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_DataEnvelope_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_DataEnvelope_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getDataFieldBuilder(); + getPruningFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (dataBuilder_ == null) { + data_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } else { + dataBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (pruningBuilder_ == null) { + pruning_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + pruningBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_DataEnvelope_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (dataBuilder_ == null) { + result.data_ = data_; + } else { + result.data_ = dataBuilder_.build(); + } + if (pruningBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + pruning_ = java.util.Collections.unmodifiableList(pruning_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.pruning_ = pruning_; + } else { + result.pruning_ = pruningBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance()) return this; + if (other.hasData()) { + mergeData(other.getData()); + } + if (pruningBuilder_ == null) { + if (!other.pruning_.isEmpty()) { + if (pruning_.isEmpty()) { + pruning_ = other.pruning_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensurePruningIsMutable(); + pruning_.addAll(other.pruning_); + } + onChanged(); + } + } else { + if (!other.pruning_.isEmpty()) { + if (pruningBuilder_.isEmpty()) { + pruningBuilder_.dispose(); + pruningBuilder_ = null; + pruning_ = other.pruning_; + bitField0_ = (bitField0_ & ~0x00000002); + pruningBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getPruningFieldBuilder() : null; + } else { + pruningBuilder_.addAllMessages(other.pruning_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasData()) { + + return false; + } + if (!getData().isInitialized()) { + + return false; + } + for (int i = 0; i < getPruningCount(); i++) { + if (!getPruning(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.OtherMessage data = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage data_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> dataBuilder_; + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + public boolean hasData() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getData() { + if (dataBuilder_ == null) { + return data_; + } else { + return dataBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + public Builder setData(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (dataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + data_ = value; + onChanged(); + } else { + dataBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + public Builder setData( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder builderForValue) { + if (dataBuilder_ == null) { + data_ = builderForValue.build(); + onChanged(); + } else { + dataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + public Builder mergeData(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage value) { + if (dataBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + data_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) { + data_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder(data_).mergeFrom(value).buildPartial(); + } else { + data_ = value; + } + onChanged(); + } else { + dataBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + public Builder clearData() { + if (dataBuilder_ == null) { + data_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + onChanged(); + } else { + dataBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder getDataBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getDataFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder getDataOrBuilder() { + if (dataBuilder_ != null) { + return dataBuilder_.getMessageOrBuilder(); + } else { + return data_; + } + } + /** + * required .akka.cluster.ddata.OtherMessage data = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder> + getDataFieldBuilder() { + if (dataBuilder_ == null) { + dataBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder>( + data_, + getParentForChildren(), + isClean()); + data_ = null; + } + return dataBuilder_; + } + + // repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + private java.util.List pruning_ = + java.util.Collections.emptyList(); + private void ensurePruningIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + pruning_ = new java.util.ArrayList(pruning_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntryOrBuilder> pruningBuilder_; + + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public java.util.List getPruningList() { + if (pruningBuilder_ == null) { + return java.util.Collections.unmodifiableList(pruning_); + } else { + return pruningBuilder_.getMessageList(); + } + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public int getPruningCount() { + if (pruningBuilder_ == null) { + return pruning_.size(); + } else { + return pruningBuilder_.getCount(); + } + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry getPruning(int index) { + if (pruningBuilder_ == null) { + return pruning_.get(index); + } else { + return pruningBuilder_.getMessage(index); + } + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public Builder setPruning( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry value) { + if (pruningBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePruningIsMutable(); + pruning_.set(index, value); + onChanged(); + } else { + pruningBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public Builder setPruning( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.Builder builderForValue) { + if (pruningBuilder_ == null) { + ensurePruningIsMutable(); + pruning_.set(index, builderForValue.build()); + onChanged(); + } else { + pruningBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public Builder addPruning(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry value) { + if (pruningBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePruningIsMutable(); + pruning_.add(value); + onChanged(); + } else { + pruningBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public Builder addPruning( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry value) { + if (pruningBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePruningIsMutable(); + pruning_.add(index, value); + onChanged(); + } else { + pruningBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public Builder addPruning( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.Builder builderForValue) { + if (pruningBuilder_ == null) { + ensurePruningIsMutable(); + pruning_.add(builderForValue.build()); + onChanged(); + } else { + pruningBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public Builder addPruning( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.Builder builderForValue) { + if (pruningBuilder_ == null) { + ensurePruningIsMutable(); + pruning_.add(index, builderForValue.build()); + onChanged(); + } else { + pruningBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public Builder addAllPruning( + java.lang.Iterable values) { + if (pruningBuilder_ == null) { + ensurePruningIsMutable(); + super.addAll(values, pruning_); + onChanged(); + } else { + pruningBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public Builder clearPruning() { + if (pruningBuilder_ == null) { + pruning_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + pruningBuilder_.clear(); + } + return this; + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public Builder removePruning(int index) { + if (pruningBuilder_ == null) { + ensurePruningIsMutable(); + pruning_.remove(index); + onChanged(); + } else { + pruningBuilder_.remove(index); + } + return this; + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.Builder getPruningBuilder( + int index) { + return getPruningFieldBuilder().getBuilder(index); + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntryOrBuilder getPruningOrBuilder( + int index) { + if (pruningBuilder_ == null) { + return pruning_.get(index); } else { + return pruningBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public java.util.List + getPruningOrBuilderList() { + if (pruningBuilder_ != null) { + return pruningBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(pruning_); + } + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.Builder addPruningBuilder() { + return getPruningFieldBuilder().addBuilder( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.Builder addPruningBuilder( + int index) { + return getPruningFieldBuilder().addBuilder( + index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.DataEnvelope.PruningEntry pruning = 2; + */ + public java.util.List + getPruningBuilderList() { + return getPruningFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntryOrBuilder> + getPruningFieldBuilder() { + if (pruningBuilder_ == null) { + pruningBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PruningEntryOrBuilder>( + pruning_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + pruning_ = null; + } + return pruningBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.DataEnvelope) + } + + static { + defaultInstance = new DataEnvelope(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.DataEnvelope) + } + + public interface StatusOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint32 chunk = 1; + /** + * required uint32 chunk = 1; + */ + boolean hasChunk(); + /** + * required uint32 chunk = 1; + */ + int getChunk(); + + // required uint32 totChunks = 2; + /** + * required uint32 totChunks = 2; + */ + boolean hasTotChunks(); + /** + * required uint32 totChunks = 2; + */ + int getTotChunks(); + + // repeated .akka.cluster.ddata.Status.Entry entries = 3; + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + java.util.List + getEntriesList(); + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry getEntries(int index); + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + int getEntriesCount(); + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + java.util.List + getEntriesOrBuilderList(); + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.EntryOrBuilder getEntriesOrBuilder( + int index); + } + /** + * Protobuf type {@code akka.cluster.ddata.Status} + */ + public static final class Status extends + com.google.protobuf.GeneratedMessage + implements StatusOrBuilder { + // Use Status.newBuilder() to construct. + private Status(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Status(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Status defaultInstance; + public static Status getDefaultInstance() { + return defaultInstance; + } + + public Status getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Status( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + chunk_ = input.readUInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + totChunks_ = input.readUInt32(); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + entries_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + entries_.add(input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + entries_ = java.util.Collections.unmodifiableList(entries_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Status_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Status_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Status parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Status(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface EntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string key = 1; + /** + * required string key = 1; + */ + boolean hasKey(); + /** + * required string key = 1; + */ + java.lang.String getKey(); + /** + * required string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(); + + // required bytes digest = 2; + /** + * required bytes digest = 2; + */ + boolean hasDigest(); + /** + * required bytes digest = 2; + */ + com.google.protobuf.ByteString getDigest(); + } + /** + * Protobuf type {@code akka.cluster.ddata.Status.Entry} + */ + public static final class Entry extends + com.google.protobuf.GeneratedMessage + implements EntryOrBuilder { + // Use Entry.newBuilder() to construct. + private Entry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Entry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Entry defaultInstance; + public static Entry getDefaultInstance() { + return defaultInstance; + } + + public Entry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Entry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + key_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + digest_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Status_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Status_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Entry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Entry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private java.lang.Object key_; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required bytes digest = 2; + public static final int DIGEST_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString digest_; + /** + * required bytes digest = 2; + */ + public boolean hasDigest() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes digest = 2; + */ + public com.google.protobuf.ByteString getDigest() { + return digest_; + } + + private void initFields() { + key_ = ""; + digest_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasDigest()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, digest_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, digest_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.Status.Entry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.EntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Status_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Status_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + digest_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Status_Entry_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.key_ = key_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.digest_ = digest_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.getDefaultInstance()) return this; + if (other.hasKey()) { + bitField0_ |= 0x00000001; + key_ = other.key_; + onChanged(); + } + if (other.hasDigest()) { + setDigest(other.getDigest()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!hasDigest()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string key = 1; + private java.lang.Object key_ = ""; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string key = 1; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000001); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + + // required bytes digest = 2; + private com.google.protobuf.ByteString digest_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes digest = 2; + */ + public boolean hasDigest() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes digest = 2; + */ + public com.google.protobuf.ByteString getDigest() { + return digest_; + } + /** + * required bytes digest = 2; + */ + public Builder setDigest(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + digest_ = value; + onChanged(); + return this; + } + /** + * required bytes digest = 2; + */ + public Builder clearDigest() { + bitField0_ = (bitField0_ & ~0x00000002); + digest_ = getDefaultInstance().getDigest(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.Status.Entry) + } + + static { + defaultInstance = new Entry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.Status.Entry) + } + + private int bitField0_; + // required uint32 chunk = 1; + public static final int CHUNK_FIELD_NUMBER = 1; + private int chunk_; + /** + * required uint32 chunk = 1; + */ + public boolean hasChunk() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint32 chunk = 1; + */ + public int getChunk() { + return chunk_; + } + + // required uint32 totChunks = 2; + public static final int TOTCHUNKS_FIELD_NUMBER = 2; + private int totChunks_; + /** + * required uint32 totChunks = 2; + */ + public boolean hasTotChunks() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 totChunks = 2; + */ + public int getTotChunks() { + return totChunks_; + } + + // repeated .akka.cluster.ddata.Status.Entry entries = 3; + public static final int ENTRIES_FIELD_NUMBER = 3; + private java.util.List entries_; + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public java.util.List getEntriesList() { + return entries_; + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public java.util.List + getEntriesOrBuilderList() { + return entries_; + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public int getEntriesCount() { + return entries_.size(); + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry getEntries(int index) { + return entries_.get(index); + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.EntryOrBuilder getEntriesOrBuilder( + int index) { + return entries_.get(index); + } + + private void initFields() { + chunk_ = 0; + totChunks_ = 0; + entries_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasChunk()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTotChunks()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getEntriesCount(); i++) { + if (!getEntries(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, chunk_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, totChunks_); + } + for (int i = 0; i < entries_.size(); i++) { + output.writeMessage(3, entries_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, chunk_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, totChunks_); + } + for (int i = 0; i < entries_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, entries_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.Status} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StatusOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Status_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Status_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getEntriesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + chunk_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + totChunks_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + if (entriesBuilder_ == null) { + entries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + entriesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Status_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.chunk_ = chunk_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.totChunks_ = totChunks_; + if (entriesBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + entries_ = java.util.Collections.unmodifiableList(entries_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.entries_ = entries_; + } else { + result.entries_ = entriesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.getDefaultInstance()) return this; + if (other.hasChunk()) { + setChunk(other.getChunk()); + } + if (other.hasTotChunks()) { + setTotChunks(other.getTotChunks()); + } + if (entriesBuilder_ == null) { + if (!other.entries_.isEmpty()) { + if (entries_.isEmpty()) { + entries_ = other.entries_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureEntriesIsMutable(); + entries_.addAll(other.entries_); + } + onChanged(); + } + } else { + if (!other.entries_.isEmpty()) { + if (entriesBuilder_.isEmpty()) { + entriesBuilder_.dispose(); + entriesBuilder_ = null; + entries_ = other.entries_; + bitField0_ = (bitField0_ & ~0x00000004); + entriesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getEntriesFieldBuilder() : null; + } else { + entriesBuilder_.addAllMessages(other.entries_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasChunk()) { + + return false; + } + if (!hasTotChunks()) { + + return false; + } + for (int i = 0; i < getEntriesCount(); i++) { + if (!getEntries(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint32 chunk = 1; + private int chunk_ ; + /** + * required uint32 chunk = 1; + */ + public boolean hasChunk() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint32 chunk = 1; + */ + public int getChunk() { + return chunk_; + } + /** + * required uint32 chunk = 1; + */ + public Builder setChunk(int value) { + bitField0_ |= 0x00000001; + chunk_ = value; + onChanged(); + return this; + } + /** + * required uint32 chunk = 1; + */ + public Builder clearChunk() { + bitField0_ = (bitField0_ & ~0x00000001); + chunk_ = 0; + onChanged(); + return this; + } + + // required uint32 totChunks = 2; + private int totChunks_ ; + /** + * required uint32 totChunks = 2; + */ + public boolean hasTotChunks() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 totChunks = 2; + */ + public int getTotChunks() { + return totChunks_; + } + /** + * required uint32 totChunks = 2; + */ + public Builder setTotChunks(int value) { + bitField0_ |= 0x00000002; + totChunks_ = value; + onChanged(); + return this; + } + /** + * required uint32 totChunks = 2; + */ + public Builder clearTotChunks() { + bitField0_ = (bitField0_ & ~0x00000002); + totChunks_ = 0; + onChanged(); + return this; + } + + // repeated .akka.cluster.ddata.Status.Entry entries = 3; + private java.util.List entries_ = + java.util.Collections.emptyList(); + private void ensureEntriesIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + entries_ = new java.util.ArrayList(entries_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.EntryOrBuilder> entriesBuilder_; + + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public java.util.List getEntriesList() { + if (entriesBuilder_ == null) { + return java.util.Collections.unmodifiableList(entries_); + } else { + return entriesBuilder_.getMessageList(); + } + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public int getEntriesCount() { + if (entriesBuilder_ == null) { + return entries_.size(); + } else { + return entriesBuilder_.getCount(); + } + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry getEntries(int index) { + if (entriesBuilder_ == null) { + return entries_.get(index); + } else { + return entriesBuilder_.getMessage(index); + } + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public Builder setEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.set(index, value); + onChanged(); + } else { + entriesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public Builder setEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.set(index, builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public Builder addEntries(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.add(value); + onChanged(); + } else { + entriesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public Builder addEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.add(index, value); + onChanged(); + } else { + entriesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public Builder addEntries( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.add(builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public Builder addEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.add(index, builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public Builder addAllEntries( + java.lang.Iterable values) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + super.addAll(values, entries_); + onChanged(); + } else { + entriesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public Builder clearEntries() { + if (entriesBuilder_ == null) { + entries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + entriesBuilder_.clear(); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public Builder removeEntries(int index) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.remove(index); + onChanged(); + } else { + entriesBuilder_.remove(index); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.Builder getEntriesBuilder( + int index) { + return getEntriesFieldBuilder().getBuilder(index); + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.EntryOrBuilder getEntriesOrBuilder( + int index) { + if (entriesBuilder_ == null) { + return entries_.get(index); } else { + return entriesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public java.util.List + getEntriesOrBuilderList() { + if (entriesBuilder_ != null) { + return entriesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(entries_); + } + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.Builder addEntriesBuilder() { + return getEntriesFieldBuilder().addBuilder( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.Builder addEntriesBuilder( + int index) { + return getEntriesFieldBuilder().addBuilder( + index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.Status.Entry entries = 3; + */ + public java.util.List + getEntriesBuilderList() { + return getEntriesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.EntryOrBuilder> + getEntriesFieldBuilder() { + if (entriesBuilder_ == null) { + entriesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Status.EntryOrBuilder>( + entries_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + entries_ = null; + } + return entriesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.Status) + } + + static { + defaultInstance = new Status(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.Status) + } + + public interface GossipOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool sendBack = 1; + /** + * required bool sendBack = 1; + */ + boolean hasSendBack(); + /** + * required bool sendBack = 1; + */ + boolean getSendBack(); + + // repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + java.util.List + getEntriesList(); + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry getEntries(int index); + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + int getEntriesCount(); + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + java.util.List + getEntriesOrBuilderList(); + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.EntryOrBuilder getEntriesOrBuilder( + int index); + } + /** + * Protobuf type {@code akka.cluster.ddata.Gossip} + */ + public static final class Gossip extends + com.google.protobuf.GeneratedMessage + implements GossipOrBuilder { + // Use Gossip.newBuilder() to construct. + private Gossip(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Gossip(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Gossip defaultInstance; + public static Gossip getDefaultInstance() { + return defaultInstance; + } + + public Gossip getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Gossip( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + sendBack_ = input.readBool(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + entries_.add(input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = java.util.Collections.unmodifiableList(entries_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Gossip_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Gossip_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Gossip parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Gossip(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface EntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string key = 1; + /** + * required string key = 1; + */ + boolean hasKey(); + /** + * required string key = 1; + */ + java.lang.String getKey(); + /** + * required string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(); + + // required .akka.cluster.ddata.DataEnvelope envelope = 2; + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + boolean hasEnvelope(); + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope getEnvelope(); + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder getEnvelopeOrBuilder(); + } + /** + * Protobuf type {@code akka.cluster.ddata.Gossip.Entry} + */ + public static final class Entry extends + com.google.protobuf.GeneratedMessage + implements EntryOrBuilder { + // Use Entry.newBuilder() to construct. + private Entry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Entry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Entry defaultInstance; + public static Entry getDefaultInstance() { + return defaultInstance; + } + + public Entry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Entry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + key_ = input.readBytes(); + break; + } + case 18: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = envelope_.toBuilder(); + } + envelope_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(envelope_); + envelope_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Gossip_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Gossip_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Entry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Entry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private java.lang.Object key_; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .akka.cluster.ddata.DataEnvelope envelope = 2; + public static final int ENVELOPE_FIELD_NUMBER = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope envelope_; + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public boolean hasEnvelope() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope getEnvelope() { + return envelope_; + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder getEnvelopeOrBuilder() { + return envelope_; + } + + private void initFields() { + key_ = ""; + envelope_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasEnvelope()) { + memoizedIsInitialized = 0; + return false; + } + if (!getEnvelope().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, envelope_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, envelope_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.Gossip.Entry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.EntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Gossip_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Gossip_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getEnvelopeFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (envelopeBuilder_ == null) { + envelope_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance(); + } else { + envelopeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Gossip_Entry_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.key_ = key_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (envelopeBuilder_ == null) { + result.envelope_ = envelope_; + } else { + result.envelope_ = envelopeBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.getDefaultInstance()) return this; + if (other.hasKey()) { + bitField0_ |= 0x00000001; + key_ = other.key_; + onChanged(); + } + if (other.hasEnvelope()) { + mergeEnvelope(other.getEnvelope()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!hasEnvelope()) { + + return false; + } + if (!getEnvelope().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string key = 1; + private java.lang.Object key_ = ""; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string key = 1; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000001); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + + // required .akka.cluster.ddata.DataEnvelope envelope = 2; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope envelope_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder> envelopeBuilder_; + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public boolean hasEnvelope() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope getEnvelope() { + if (envelopeBuilder_ == null) { + return envelope_; + } else { + return envelopeBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public Builder setEnvelope(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope value) { + if (envelopeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + envelope_ = value; + onChanged(); + } else { + envelopeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public Builder setEnvelope( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder builderForValue) { + if (envelopeBuilder_ == null) { + envelope_ = builderForValue.build(); + onChanged(); + } else { + envelopeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public Builder mergeEnvelope(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope value) { + if (envelopeBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + envelope_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance()) { + envelope_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.newBuilder(envelope_).mergeFrom(value).buildPartial(); + } else { + envelope_ = value; + } + onChanged(); + } else { + envelopeBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public Builder clearEnvelope() { + if (envelopeBuilder_ == null) { + envelope_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.getDefaultInstance(); + onChanged(); + } else { + envelopeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder getEnvelopeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getEnvelopeFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder getEnvelopeOrBuilder() { + if (envelopeBuilder_ != null) { + return envelopeBuilder_.getMessageOrBuilder(); + } else { + return envelope_; + } + } + /** + * required .akka.cluster.ddata.DataEnvelope envelope = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder> + getEnvelopeFieldBuilder() { + if (envelopeBuilder_ == null) { + envelopeBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelope.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.DataEnvelopeOrBuilder>( + envelope_, + getParentForChildren(), + isClean()); + envelope_ = null; + } + return envelopeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.Gossip.Entry) + } + + static { + defaultInstance = new Entry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.Gossip.Entry) + } + + private int bitField0_; + // required bool sendBack = 1; + public static final int SENDBACK_FIELD_NUMBER = 1; + private boolean sendBack_; + /** + * required bool sendBack = 1; + */ + public boolean hasSendBack() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool sendBack = 1; + */ + public boolean getSendBack() { + return sendBack_; + } + + // repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + public static final int ENTRIES_FIELD_NUMBER = 2; + private java.util.List entries_; + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public java.util.List getEntriesList() { + return entries_; + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public java.util.List + getEntriesOrBuilderList() { + return entries_; + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public int getEntriesCount() { + return entries_.size(); + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry getEntries(int index) { + return entries_.get(index); + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.EntryOrBuilder getEntriesOrBuilder( + int index) { + return entries_.get(index); + } + + private void initFields() { + sendBack_ = false; + entries_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasSendBack()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getEntriesCount(); i++) { + if (!getEntries(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, sendBack_); + } + for (int i = 0; i < entries_.size(); i++) { + output.writeMessage(2, entries_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, sendBack_); + } + for (int i = 0; i < entries_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, entries_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.Gossip} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.GossipOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Gossip_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Gossip_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getEntriesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + sendBack_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + if (entriesBuilder_ == null) { + entries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + entriesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Gossip_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.sendBack_ = sendBack_; + if (entriesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = java.util.Collections.unmodifiableList(entries_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.entries_ = entries_; + } else { + result.entries_ = entriesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.getDefaultInstance()) return this; + if (other.hasSendBack()) { + setSendBack(other.getSendBack()); + } + if (entriesBuilder_ == null) { + if (!other.entries_.isEmpty()) { + if (entries_.isEmpty()) { + entries_ = other.entries_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureEntriesIsMutable(); + entries_.addAll(other.entries_); + } + onChanged(); + } + } else { + if (!other.entries_.isEmpty()) { + if (entriesBuilder_.isEmpty()) { + entriesBuilder_.dispose(); + entriesBuilder_ = null; + entries_ = other.entries_; + bitField0_ = (bitField0_ & ~0x00000002); + entriesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getEntriesFieldBuilder() : null; + } else { + entriesBuilder_.addAllMessages(other.entries_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasSendBack()) { + + return false; + } + for (int i = 0; i < getEntriesCount(); i++) { + if (!getEntries(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool sendBack = 1; + private boolean sendBack_ ; + /** + * required bool sendBack = 1; + */ + public boolean hasSendBack() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool sendBack = 1; + */ + public boolean getSendBack() { + return sendBack_; + } + /** + * required bool sendBack = 1; + */ + public Builder setSendBack(boolean value) { + bitField0_ |= 0x00000001; + sendBack_ = value; + onChanged(); + return this; + } + /** + * required bool sendBack = 1; + */ + public Builder clearSendBack() { + bitField0_ = (bitField0_ & ~0x00000001); + sendBack_ = false; + onChanged(); + return this; + } + + // repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + private java.util.List entries_ = + java.util.Collections.emptyList(); + private void ensureEntriesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + entries_ = new java.util.ArrayList(entries_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.EntryOrBuilder> entriesBuilder_; + + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public java.util.List getEntriesList() { + if (entriesBuilder_ == null) { + return java.util.Collections.unmodifiableList(entries_); + } else { + return entriesBuilder_.getMessageList(); + } + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public int getEntriesCount() { + if (entriesBuilder_ == null) { + return entries_.size(); + } else { + return entriesBuilder_.getCount(); + } + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry getEntries(int index) { + if (entriesBuilder_ == null) { + return entries_.get(index); + } else { + return entriesBuilder_.getMessage(index); + } + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public Builder setEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.set(index, value); + onChanged(); + } else { + entriesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public Builder setEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.set(index, builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public Builder addEntries(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.add(value); + onChanged(); + } else { + entriesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public Builder addEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry value) { + if (entriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntriesIsMutable(); + entries_.add(index, value); + onChanged(); + } else { + entriesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public Builder addEntries( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.add(builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public Builder addEntries( + int index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.Builder builderForValue) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.add(index, builderForValue.build()); + onChanged(); + } else { + entriesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public Builder addAllEntries( + java.lang.Iterable values) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + super.addAll(values, entries_); + onChanged(); + } else { + entriesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public Builder clearEntries() { + if (entriesBuilder_ == null) { + entries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + entriesBuilder_.clear(); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public Builder removeEntries(int index) { + if (entriesBuilder_ == null) { + ensureEntriesIsMutable(); + entries_.remove(index); + onChanged(); + } else { + entriesBuilder_.remove(index); + } + return this; + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.Builder getEntriesBuilder( + int index) { + return getEntriesFieldBuilder().getBuilder(index); + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.EntryOrBuilder getEntriesOrBuilder( + int index) { + if (entriesBuilder_ == null) { + return entries_.get(index); } else { + return entriesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public java.util.List + getEntriesOrBuilderList() { + if (entriesBuilder_ != null) { + return entriesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(entries_); + } + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.Builder addEntriesBuilder() { + return getEntriesFieldBuilder().addBuilder( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.Builder addEntriesBuilder( + int index) { + return getEntriesFieldBuilder().addBuilder( + index, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.getDefaultInstance()); + } + /** + * repeated .akka.cluster.ddata.Gossip.Entry entries = 2; + */ + public java.util.List + getEntriesBuilderList() { + return getEntriesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.EntryOrBuilder> + getEntriesFieldBuilder() { + if (entriesBuilder_ == null) { + entriesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.Entry.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Gossip.EntryOrBuilder>( + entries_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + entries_ = null; + } + return entriesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.Gossip) + } + + static { + defaultInstance = new Gossip(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.Gossip) + } + + public interface UniqueAddressOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .akka.cluster.ddata.Address address = 1; + /** + * required .akka.cluster.ddata.Address address = 1; + */ + boolean hasAddress(); + /** + * required .akka.cluster.ddata.Address address = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address getAddress(); + /** + * required .akka.cluster.ddata.Address address = 1; + */ + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.AddressOrBuilder getAddressOrBuilder(); + + // required sfixed32 uid = 2; + /** + * required sfixed32 uid = 2; + */ + boolean hasUid(); + /** + * required sfixed32 uid = 2; + */ + int getUid(); + } + /** + * Protobuf type {@code akka.cluster.ddata.UniqueAddress} + */ + public static final class UniqueAddress extends + com.google.protobuf.GeneratedMessage + implements UniqueAddressOrBuilder { + // Use UniqueAddress.newBuilder() to construct. + private UniqueAddress(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private UniqueAddress(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final UniqueAddress defaultInstance; + public static UniqueAddress getDefaultInstance() { + return defaultInstance; + } + + public UniqueAddress getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private UniqueAddress( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = address_.toBuilder(); + } + address_ = input.readMessage(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(address_); + address_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 21: { + bitField0_ |= 0x00000002; + uid_ = input.readSFixed32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_UniqueAddress_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_UniqueAddress_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public UniqueAddress parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UniqueAddress(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .akka.cluster.ddata.Address address = 1; + public static final int ADDRESS_FIELD_NUMBER = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address address_; + /** + * required .akka.cluster.ddata.Address address = 1; + */ + public boolean hasAddress() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.Address address = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address getAddress() { + return address_; + } + /** + * required .akka.cluster.ddata.Address address = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.AddressOrBuilder getAddressOrBuilder() { + return address_; + } + + // required sfixed32 uid = 2; + public static final int UID_FIELD_NUMBER = 2; + private int uid_; + /** + * required sfixed32 uid = 2; + */ + public boolean hasUid() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required sfixed32 uid = 2; + */ + public int getUid() { + return uid_; + } + + private void initFields() { + address_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.getDefaultInstance(); + uid_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasAddress()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasUid()) { + memoizedIsInitialized = 0; + return false; + } + if (!getAddress().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, address_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeSFixed32(2, uid_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, address_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeSFixed32Size(2, uid_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.UniqueAddress} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddressOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_UniqueAddress_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_UniqueAddress_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getAddressFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (addressBuilder_ == null) { + address_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.getDefaultInstance(); + } else { + addressBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + uid_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_UniqueAddress_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (addressBuilder_ == null) { + result.address_ = address_; + } else { + result.address_ = addressBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.uid_ = uid_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress.getDefaultInstance()) return this; + if (other.hasAddress()) { + mergeAddress(other.getAddress()); + } + if (other.hasUid()) { + setUid(other.getUid()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasAddress()) { + + return false; + } + if (!hasUid()) { + + return false; + } + if (!getAddress().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.UniqueAddress) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .akka.cluster.ddata.Address address = 1; + private akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address address_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.AddressOrBuilder> addressBuilder_; + /** + * required .akka.cluster.ddata.Address address = 1; + */ + public boolean hasAddress() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .akka.cluster.ddata.Address address = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address getAddress() { + if (addressBuilder_ == null) { + return address_; + } else { + return addressBuilder_.getMessage(); + } + } + /** + * required .akka.cluster.ddata.Address address = 1; + */ + public Builder setAddress(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address value) { + if (addressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + address_ = value; + onChanged(); + } else { + addressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.Address address = 1; + */ + public Builder setAddress( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder builderForValue) { + if (addressBuilder_ == null) { + address_ = builderForValue.build(); + onChanged(); + } else { + addressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.Address address = 1; + */ + public Builder mergeAddress(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address value) { + if (addressBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + address_ != akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.getDefaultInstance()) { + address_ = + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.newBuilder(address_).mergeFrom(value).buildPartial(); + } else { + address_ = value; + } + onChanged(); + } else { + addressBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .akka.cluster.ddata.Address address = 1; + */ + public Builder clearAddress() { + if (addressBuilder_ == null) { + address_ = akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.getDefaultInstance(); + onChanged(); + } else { + addressBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .akka.cluster.ddata.Address address = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder getAddressBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getAddressFieldBuilder().getBuilder(); + } + /** + * required .akka.cluster.ddata.Address address = 1; + */ + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.AddressOrBuilder getAddressOrBuilder() { + if (addressBuilder_ != null) { + return addressBuilder_.getMessageOrBuilder(); + } else { + return address_; + } + } + /** + * required .akka.cluster.ddata.Address address = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.AddressOrBuilder> + getAddressFieldBuilder() { + if (addressBuilder_ == null) { + addressBuilder_ = new com.google.protobuf.SingleFieldBuilder< + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.AddressOrBuilder>( + address_, + getParentForChildren(), + isClean()); + address_ = null; + } + return addressBuilder_; + } + + // required sfixed32 uid = 2; + private int uid_ ; + /** + * required sfixed32 uid = 2; + */ + public boolean hasUid() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required sfixed32 uid = 2; + */ + public int getUid() { + return uid_; + } + /** + * required sfixed32 uid = 2; + */ + public Builder setUid(int value) { + bitField0_ |= 0x00000002; + uid_ = value; + onChanged(); + return this; + } + /** + * required sfixed32 uid = 2; + */ + public Builder clearUid() { + bitField0_ = (bitField0_ & ~0x00000002); + uid_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.UniqueAddress) + } + + static { + defaultInstance = new UniqueAddress(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.UniqueAddress) + } + + public interface AddressOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string hostname = 1; + /** + * required string hostname = 1; + */ + boolean hasHostname(); + /** + * required string hostname = 1; + */ + java.lang.String getHostname(); + /** + * required string hostname = 1; + */ + com.google.protobuf.ByteString + getHostnameBytes(); + + // required uint32 port = 2; + /** + * required uint32 port = 2; + */ + boolean hasPort(); + /** + * required uint32 port = 2; + */ + int getPort(); + } + /** + * Protobuf type {@code akka.cluster.ddata.Address} + */ + public static final class Address extends + com.google.protobuf.GeneratedMessage + implements AddressOrBuilder { + // Use Address.newBuilder() to construct. + private Address(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Address(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Address defaultInstance; + public static Address getDefaultInstance() { + return defaultInstance; + } + + public Address getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Address( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + hostname_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + port_ = input.readUInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Address_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Address_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder.class); + } + + public static com.google.protobuf.Parser
PARSER = + new com.google.protobuf.AbstractParser
() { + public Address parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Address(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser
getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string hostname = 1; + public static final int HOSTNAME_FIELD_NUMBER = 1; + private java.lang.Object hostname_; + /** + * required string hostname = 1; + */ + public boolean hasHostname() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string hostname = 1; + */ + public java.lang.String getHostname() { + java.lang.Object ref = hostname_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + hostname_ = s; + } + return s; + } + } + /** + * required string hostname = 1; + */ + public com.google.protobuf.ByteString + getHostnameBytes() { + java.lang.Object ref = hostname_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + hostname_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint32 port = 2; + public static final int PORT_FIELD_NUMBER = 2; + private int port_; + /** + * required uint32 port = 2; + */ + public boolean hasPort() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 port = 2; + */ + public int getPort() { + return port_; + } + + private void initFields() { + hostname_ = ""; + port_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasHostname()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasPort()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getHostnameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, port_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getHostnameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, port_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.Address} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.AddressOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Address_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Address_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + hostname_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + port_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_Address_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.hostname_ = hostname_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.port_ = port_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address.getDefaultInstance()) return this; + if (other.hasHostname()) { + bitField0_ |= 0x00000001; + hostname_ = other.hostname_; + onChanged(); + } + if (other.hasPort()) { + setPort(other.getPort()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasHostname()) { + + return false; + } + if (!hasPort()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.Address) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string hostname = 1; + private java.lang.Object hostname_ = ""; + /** + * required string hostname = 1; + */ + public boolean hasHostname() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string hostname = 1; + */ + public java.lang.String getHostname() { + java.lang.Object ref = hostname_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + hostname_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string hostname = 1; + */ + public com.google.protobuf.ByteString + getHostnameBytes() { + java.lang.Object ref = hostname_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + hostname_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string hostname = 1; + */ + public Builder setHostname( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + hostname_ = value; + onChanged(); + return this; + } + /** + * required string hostname = 1; + */ + public Builder clearHostname() { + bitField0_ = (bitField0_ & ~0x00000001); + hostname_ = getDefaultInstance().getHostname(); + onChanged(); + return this; + } + /** + * required string hostname = 1; + */ + public Builder setHostnameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + hostname_ = value; + onChanged(); + return this; + } + + // required uint32 port = 2; + private int port_ ; + /** + * required uint32 port = 2; + */ + public boolean hasPort() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 port = 2; + */ + public int getPort() { + return port_; + } + /** + * required uint32 port = 2; + */ + public Builder setPort(int value) { + bitField0_ |= 0x00000002; + port_ = value; + onChanged(); + return this; + } + /** + * required uint32 port = 2; + */ + public Builder clearPort() { + bitField0_ = (bitField0_ & ~0x00000002); + port_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.Address) + } + + static { + defaultInstance = new Address(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.Address) + } + + public interface OtherMessageOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes enclosedMessage = 1; + /** + * required bytes enclosedMessage = 1; + */ + boolean hasEnclosedMessage(); + /** + * required bytes enclosedMessage = 1; + */ + com.google.protobuf.ByteString getEnclosedMessage(); + + // required int32 serializerId = 2; + /** + * required int32 serializerId = 2; + */ + boolean hasSerializerId(); + /** + * required int32 serializerId = 2; + */ + int getSerializerId(); + + // optional bytes messageManifest = 4; + /** + * optional bytes messageManifest = 4; + */ + boolean hasMessageManifest(); + /** + * optional bytes messageManifest = 4; + */ + com.google.protobuf.ByteString getMessageManifest(); + } + /** + * Protobuf type {@code akka.cluster.ddata.OtherMessage} + */ + public static final class OtherMessage extends + com.google.protobuf.GeneratedMessage + implements OtherMessageOrBuilder { + // Use OtherMessage.newBuilder() to construct. + private OtherMessage(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private OtherMessage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final OtherMessage defaultInstance; + public static OtherMessage getDefaultInstance() { + return defaultInstance; + } + + public OtherMessage getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private OtherMessage( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + enclosedMessage_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + serializerId_ = input.readInt32(); + break; + } + case 34: { + bitField0_ |= 0x00000004; + messageManifest_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_OtherMessage_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_OtherMessage_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public OtherMessage parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new OtherMessage(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bytes enclosedMessage = 1; + public static final int ENCLOSEDMESSAGE_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString enclosedMessage_; + /** + * required bytes enclosedMessage = 1; + */ + public boolean hasEnclosedMessage() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes enclosedMessage = 1; + */ + public com.google.protobuf.ByteString getEnclosedMessage() { + return enclosedMessage_; + } + + // required int32 serializerId = 2; + public static final int SERIALIZERID_FIELD_NUMBER = 2; + private int serializerId_; + /** + * required int32 serializerId = 2; + */ + public boolean hasSerializerId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int32 serializerId = 2; + */ + public int getSerializerId() { + return serializerId_; + } + + // optional bytes messageManifest = 4; + public static final int MESSAGEMANIFEST_FIELD_NUMBER = 4; + private com.google.protobuf.ByteString messageManifest_; + /** + * optional bytes messageManifest = 4; + */ + public boolean hasMessageManifest() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bytes messageManifest = 4; + */ + public com.google.protobuf.ByteString getMessageManifest() { + return messageManifest_; + } + + private void initFields() { + enclosedMessage_ = com.google.protobuf.ByteString.EMPTY; + serializerId_ = 0; + messageManifest_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasEnclosedMessage()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSerializerId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, enclosedMessage_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt32(2, serializerId_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(4, messageManifest_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, enclosedMessage_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, serializerId_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, messageManifest_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.OtherMessage} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessageOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_OtherMessage_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_OtherMessage_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + enclosedMessage_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + serializerId_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + messageManifest_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_OtherMessage_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.enclosedMessage_ = enclosedMessage_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.serializerId_ = serializerId_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.messageManifest_ = messageManifest_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage.getDefaultInstance()) return this; + if (other.hasEnclosedMessage()) { + setEnclosedMessage(other.getEnclosedMessage()); + } + if (other.hasSerializerId()) { + setSerializerId(other.getSerializerId()); + } + if (other.hasMessageManifest()) { + setMessageManifest(other.getMessageManifest()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasEnclosedMessage()) { + + return false; + } + if (!hasSerializerId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bytes enclosedMessage = 1; + private com.google.protobuf.ByteString enclosedMessage_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes enclosedMessage = 1; + */ + public boolean hasEnclosedMessage() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes enclosedMessage = 1; + */ + public com.google.protobuf.ByteString getEnclosedMessage() { + return enclosedMessage_; + } + /** + * required bytes enclosedMessage = 1; + */ + public Builder setEnclosedMessage(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + enclosedMessage_ = value; + onChanged(); + return this; + } + /** + * required bytes enclosedMessage = 1; + */ + public Builder clearEnclosedMessage() { + bitField0_ = (bitField0_ & ~0x00000001); + enclosedMessage_ = getDefaultInstance().getEnclosedMessage(); + onChanged(); + return this; + } + + // required int32 serializerId = 2; + private int serializerId_ ; + /** + * required int32 serializerId = 2; + */ + public boolean hasSerializerId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int32 serializerId = 2; + */ + public int getSerializerId() { + return serializerId_; + } + /** + * required int32 serializerId = 2; + */ + public Builder setSerializerId(int value) { + bitField0_ |= 0x00000002; + serializerId_ = value; + onChanged(); + return this; + } + /** + * required int32 serializerId = 2; + */ + public Builder clearSerializerId() { + bitField0_ = (bitField0_ & ~0x00000002); + serializerId_ = 0; + onChanged(); + return this; + } + + // optional bytes messageManifest = 4; + private com.google.protobuf.ByteString messageManifest_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes messageManifest = 4; + */ + public boolean hasMessageManifest() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bytes messageManifest = 4; + */ + public com.google.protobuf.ByteString getMessageManifest() { + return messageManifest_; + } + /** + * optional bytes messageManifest = 4; + */ + public Builder setMessageManifest(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + messageManifest_ = value; + onChanged(); + return this; + } + /** + * optional bytes messageManifest = 4; + */ + public Builder clearMessageManifest() { + bitField0_ = (bitField0_ & ~0x00000004); + messageManifest_ = getDefaultInstance().getMessageManifest(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.OtherMessage) + } + + static { + defaultInstance = new OtherMessage(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.OtherMessage) + } + + public interface StringGSetOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated string elements = 1; + /** + * repeated string elements = 1; + */ + java.util.List + getElementsList(); + /** + * repeated string elements = 1; + */ + int getElementsCount(); + /** + * repeated string elements = 1; + */ + java.lang.String getElements(int index); + /** + * repeated string elements = 1; + */ + com.google.protobuf.ByteString + getElementsBytes(int index); + } + /** + * Protobuf type {@code akka.cluster.ddata.StringGSet} + */ + public static final class StringGSet extends + com.google.protobuf.GeneratedMessage + implements StringGSetOrBuilder { + // Use StringGSet.newBuilder() to construct. + private StringGSet(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StringGSet(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StringGSet defaultInstance; + public static StringGSet getDefaultInstance() { + return defaultInstance; + } + + public StringGSet getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StringGSet( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + elements_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + elements_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + elements_ = new com.google.protobuf.UnmodifiableLazyStringList(elements_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_StringGSet_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_StringGSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public StringGSet parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StringGSet(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated string elements = 1; + public static final int ELEMENTS_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList elements_; + /** + * repeated string elements = 1; + */ + public java.util.List + getElementsList() { + return elements_; + } + /** + * repeated string elements = 1; + */ + public int getElementsCount() { + return elements_.size(); + } + /** + * repeated string elements = 1; + */ + public java.lang.String getElements(int index) { + return elements_.get(index); + } + /** + * repeated string elements = 1; + */ + public com.google.protobuf.ByteString + getElementsBytes(int index) { + return elements_.getByteString(index); + } + + private void initFields() { + elements_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < elements_.size(); i++) { + output.writeBytes(1, elements_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < elements_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(elements_.getByteString(i)); + } + size += dataSize; + size += 1 * getElementsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code akka.cluster.ddata.StringGSet} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSetOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_StringGSet_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_StringGSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet.class, akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet.Builder.class); + } + + // Construct using akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + elements_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.internal_static_akka_cluster_ddata_StringGSet_descriptor; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet getDefaultInstanceForType() { + return akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet.getDefaultInstance(); + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet build() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet buildPartial() { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet result = new akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + elements_ = new com.google.protobuf.UnmodifiableLazyStringList( + elements_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.elements_ = elements_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet) { + return mergeFrom((akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet other) { + if (other == akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet.getDefaultInstance()) return this; + if (!other.elements_.isEmpty()) { + if (elements_.isEmpty()) { + elements_ = other.elements_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureElementsIsMutable(); + elements_.addAll(other.elements_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.ddata.protobuf.msg.ReplicatorMessages.StringGSet) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated string elements = 1; + private com.google.protobuf.LazyStringList elements_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureElementsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + elements_ = new com.google.protobuf.LazyStringArrayList(elements_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string elements = 1; + */ + public java.util.List + getElementsList() { + return java.util.Collections.unmodifiableList(elements_); + } + /** + * repeated string elements = 1; + */ + public int getElementsCount() { + return elements_.size(); + } + /** + * repeated string elements = 1; + */ + public java.lang.String getElements(int index) { + return elements_.get(index); + } + /** + * repeated string elements = 1; + */ + public com.google.protobuf.ByteString + getElementsBytes(int index) { + return elements_.getByteString(index); + } + /** + * repeated string elements = 1; + */ + public Builder setElements( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureElementsIsMutable(); + elements_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string elements = 1; + */ + public Builder addElements( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureElementsIsMutable(); + elements_.add(value); + onChanged(); + return this; + } + /** + * repeated string elements = 1; + */ + public Builder addAllElements( + java.lang.Iterable values) { + ensureElementsIsMutable(); + super.addAll(values, elements_); + onChanged(); + return this; + } + /** + * repeated string elements = 1; + */ + public Builder clearElements() { + elements_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string elements = 1; + */ + public Builder addElementsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureElementsIsMutable(); + elements_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:akka.cluster.ddata.StringGSet) + } + + static { + defaultInstance = new StringGSet(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:akka.cluster.ddata.StringGSet) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_Get_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_Get_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_GetSuccess_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_GetSuccess_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_NotFound_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_NotFound_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_GetFailure_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_GetFailure_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_Subscribe_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_Subscribe_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_Unsubscribe_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_Unsubscribe_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_Changed_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_Changed_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_Write_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_Write_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_Empty_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_Empty_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_Read_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_Read_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_ReadResult_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_ReadResult_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_DataEnvelope_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_DataEnvelope_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_DataEnvelope_PruningEntry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_DataEnvelope_PruningEntry_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_Status_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_Status_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_Status_Entry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_Status_Entry_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_Gossip_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_Gossip_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_Gossip_Entry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_Gossip_Entry_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_UniqueAddress_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_UniqueAddress_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_Address_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_Address_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_OtherMessage_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_OtherMessage_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_akka_cluster_ddata_StringGSet_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_akka_cluster_ddata_StringGSet_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\030ReplicatorMessages.proto\022\022akka.cluster" + + ".ddata\"\215\001\n\003Get\022-\n\003key\030\001 \002(\0132 .akka.clust" + + "er.ddata.OtherMessage\022\023\n\013consistency\030\002 \002" + + "(\021\022\017\n\007timeout\030\003 \002(\r\0221\n\007request\030\004 \001(\0132 .a" + + "kka.cluster.ddata.OtherMessage\"\236\001\n\nGetSu" + + "ccess\022-\n\003key\030\001 \002(\0132 .akka.cluster.ddata." + + "OtherMessage\022.\n\004data\030\002 \002(\0132 .akka.cluste" + + "r.ddata.OtherMessage\0221\n\007request\030\004 \001(\0132 ." + + "akka.cluster.ddata.OtherMessage\"l\n\010NotFo" + + "und\022-\n\003key\030\001 \002(\0132 .akka.cluster.ddata.Ot", + "herMessage\0221\n\007request\030\002 \001(\0132 .akka.clust" + + "er.ddata.OtherMessage\"n\n\nGetFailure\022-\n\003k" + + "ey\030\001 \002(\0132 .akka.cluster.ddata.OtherMessa" + + "ge\0221\n\007request\030\002 \001(\0132 .akka.cluster.ddata" + + ".OtherMessage\"G\n\tSubscribe\022-\n\003key\030\001 \002(\0132" + + " .akka.cluster.ddata.OtherMessage\022\013\n\003ref" + + "\030\002 \002(\t\"I\n\013Unsubscribe\022-\n\003key\030\001 \002(\0132 .akk" + + "a.cluster.ddata.OtherMessage\022\013\n\003ref\030\002 \002(" + + "\t\"h\n\007Changed\022-\n\003key\030\001 \002(\0132 .akka.cluster" + + ".ddata.OtherMessage\022.\n\004data\030\002 \002(\0132 .akka", + ".cluster.ddata.OtherMessage\"H\n\005Write\022\013\n\003" + + "key\030\001 \002(\t\0222\n\010envelope\030\002 \002(\0132 .akka.clust" + + "er.ddata.DataEnvelope\"\007\n\005Empty\"\023\n\004Read\022\013" + + "\n\003key\030\001 \002(\t\"@\n\nReadResult\0222\n\010envelope\030\001 " + + "\001(\0132 .akka.cluster.ddata.DataEnvelope\"\301\002" + + "\n\014DataEnvelope\022.\n\004data\030\001 \002(\0132 .akka.clus" + + "ter.ddata.OtherMessage\022>\n\007pruning\030\002 \003(\0132" + + "-.akka.cluster.ddata.DataEnvelope.Prunin" + + "gEntry\032\300\001\n\014PruningEntry\0229\n\016removedAddres" + + "s\030\001 \002(\0132!.akka.cluster.ddata.UniqueAddre", + "ss\0227\n\014ownerAddress\030\002 \002(\0132!.akka.cluster." + + "ddata.UniqueAddress\022\021\n\tperformed\030\003 \002(\010\022)" + + "\n\004seen\030\004 \003(\0132\033.akka.cluster.ddata.Addres" + + "s\"\203\001\n\006Status\022\r\n\005chunk\030\001 \002(\r\022\021\n\ttotChunks" + + "\030\002 \002(\r\0221\n\007entries\030\003 \003(\0132 .akka.cluster.d" + + "data.Status.Entry\032$\n\005Entry\022\013\n\003key\030\001 \002(\t\022" + + "\016\n\006digest\030\002 \002(\014\"\227\001\n\006Gossip\022\020\n\010sendBack\030\001" + + " \002(\010\0221\n\007entries\030\002 \003(\0132 .akka.cluster.dda" + + "ta.Gossip.Entry\032H\n\005Entry\022\013\n\003key\030\001 \002(\t\0222\n" + + "\010envelope\030\002 \002(\0132 .akka.cluster.ddata.Dat", + "aEnvelope\"J\n\rUniqueAddress\022,\n\007address\030\001 " + + "\002(\0132\033.akka.cluster.ddata.Address\022\013\n\003uid\030" + + "\002 \002(\017\")\n\007Address\022\020\n\010hostname\030\001 \002(\t\022\014\n\004po" + + "rt\030\002 \002(\r\"V\n\014OtherMessage\022\027\n\017enclosedMess" + + "age\030\001 \002(\014\022\024\n\014serializerId\030\002 \002(\005\022\027\n\017messa" + + "geManifest\030\004 \001(\014\"\036\n\nStringGSet\022\020\n\010elemen" + + "ts\030\001 \003(\tB#\n\037akka.cluster.ddata.protobuf." + + "msgH\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_akka_cluster_ddata_Get_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_akka_cluster_ddata_Get_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_Get_descriptor, + new java.lang.String[] { "Key", "Consistency", "Timeout", "Request", }); + internal_static_akka_cluster_ddata_GetSuccess_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_akka_cluster_ddata_GetSuccess_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_GetSuccess_descriptor, + new java.lang.String[] { "Key", "Data", "Request", }); + internal_static_akka_cluster_ddata_NotFound_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_akka_cluster_ddata_NotFound_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_NotFound_descriptor, + new java.lang.String[] { "Key", "Request", }); + internal_static_akka_cluster_ddata_GetFailure_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_akka_cluster_ddata_GetFailure_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_GetFailure_descriptor, + new java.lang.String[] { "Key", "Request", }); + internal_static_akka_cluster_ddata_Subscribe_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_akka_cluster_ddata_Subscribe_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_Subscribe_descriptor, + new java.lang.String[] { "Key", "Ref", }); + internal_static_akka_cluster_ddata_Unsubscribe_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_akka_cluster_ddata_Unsubscribe_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_Unsubscribe_descriptor, + new java.lang.String[] { "Key", "Ref", }); + internal_static_akka_cluster_ddata_Changed_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_akka_cluster_ddata_Changed_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_Changed_descriptor, + new java.lang.String[] { "Key", "Data", }); + internal_static_akka_cluster_ddata_Write_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_akka_cluster_ddata_Write_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_Write_descriptor, + new java.lang.String[] { "Key", "Envelope", }); + internal_static_akka_cluster_ddata_Empty_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_akka_cluster_ddata_Empty_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_Empty_descriptor, + new java.lang.String[] { }); + internal_static_akka_cluster_ddata_Read_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_akka_cluster_ddata_Read_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_Read_descriptor, + new java.lang.String[] { "Key", }); + internal_static_akka_cluster_ddata_ReadResult_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_akka_cluster_ddata_ReadResult_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_ReadResult_descriptor, + new java.lang.String[] { "Envelope", }); + internal_static_akka_cluster_ddata_DataEnvelope_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_akka_cluster_ddata_DataEnvelope_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_DataEnvelope_descriptor, + new java.lang.String[] { "Data", "Pruning", }); + internal_static_akka_cluster_ddata_DataEnvelope_PruningEntry_descriptor = + internal_static_akka_cluster_ddata_DataEnvelope_descriptor.getNestedTypes().get(0); + internal_static_akka_cluster_ddata_DataEnvelope_PruningEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_DataEnvelope_PruningEntry_descriptor, + new java.lang.String[] { "RemovedAddress", "OwnerAddress", "Performed", "Seen", }); + internal_static_akka_cluster_ddata_Status_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_akka_cluster_ddata_Status_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_Status_descriptor, + new java.lang.String[] { "Chunk", "TotChunks", "Entries", }); + internal_static_akka_cluster_ddata_Status_Entry_descriptor = + internal_static_akka_cluster_ddata_Status_descriptor.getNestedTypes().get(0); + internal_static_akka_cluster_ddata_Status_Entry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_Status_Entry_descriptor, + new java.lang.String[] { "Key", "Digest", }); + internal_static_akka_cluster_ddata_Gossip_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_akka_cluster_ddata_Gossip_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_Gossip_descriptor, + new java.lang.String[] { "SendBack", "Entries", }); + internal_static_akka_cluster_ddata_Gossip_Entry_descriptor = + internal_static_akka_cluster_ddata_Gossip_descriptor.getNestedTypes().get(0); + internal_static_akka_cluster_ddata_Gossip_Entry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_Gossip_Entry_descriptor, + new java.lang.String[] { "Key", "Envelope", }); + internal_static_akka_cluster_ddata_UniqueAddress_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_akka_cluster_ddata_UniqueAddress_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_UniqueAddress_descriptor, + new java.lang.String[] { "Address", "Uid", }); + internal_static_akka_cluster_ddata_Address_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_akka_cluster_ddata_Address_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_Address_descriptor, + new java.lang.String[] { "Hostname", "Port", }); + internal_static_akka_cluster_ddata_OtherMessage_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_akka_cluster_ddata_OtherMessage_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_OtherMessage_descriptor, + new java.lang.String[] { "EnclosedMessage", "SerializerId", "MessageManifest", }); + internal_static_akka_cluster_ddata_StringGSet_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_akka_cluster_ddata_StringGSet_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_akka_cluster_ddata_StringGSet_descriptor, + new java.lang.String[] { "Elements", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/akka-distributed-data/src/main/protobuf/ReplicatedDataMessages.proto b/akka-distributed-data/src/main/protobuf/ReplicatedDataMessages.proto new file mode 100644 index 0000000000..146e227104 --- /dev/null +++ b/akka-distributed-data/src/main/protobuf/ReplicatedDataMessages.proto @@ -0,0 +1,92 @@ +/** + * Copyright (C) 2014-2015 Typesafe Inc. + */ +package akka.cluster.ddata; + +option java_package = "akka.cluster.ddata.protobuf.msg"; +option optimize_for = SPEED; +import "ReplicatorMessages.proto"; + +message GSet { + repeated string stringElements = 1; + repeated sint32 intElements = 2 [packed=true]; + repeated sint64 longElements = 3 [packed=true]; + repeated OtherMessage otherElements = 4; +} + +message ORSet { + required VersionVector vvector = 1; + repeated VersionVector dots = 2; + repeated string stringElements = 3; + repeated sint32 intElements = 4 [packed=true]; + repeated sint64 longElements = 5 [packed=true]; + repeated OtherMessage otherElements = 6; + +} + +message Flag { + required bool enabled = 1; +} + +message LWWRegister { + required sint64 timestamp = 1; + required UniqueAddress node = 2; + required OtherMessage state = 3; +} + +message GCounter { + message Entry { + required UniqueAddress node = 1; + required bytes value = 2; + } + + repeated Entry entries = 1; +} + +message PNCounter { + required GCounter increments = 1; + required GCounter decrements = 2; +} + +message VersionVector { + message Entry { + required UniqueAddress node = 1; + required int64 version = 2; + } + repeated Entry entries = 1; +} + +message ORMap { + message Entry { + required string key = 1; + required OtherMessage value = 2; + } + + required ORSet keys = 1; + repeated Entry entries = 2; +} + +message LWWMap { + message Entry { + required string key = 1; + required LWWRegister value = 2; + } + + required ORSet keys = 1; + repeated Entry entries = 2; +} + +message PNCounterMap { + message Entry { + required string key = 1; + required PNCounter value = 2; + } + + required ORSet keys = 1; + repeated Entry entries = 2; +} + + + + + diff --git a/akka-distributed-data/src/main/protobuf/ReplicatorMessages.proto b/akka-distributed-data/src/main/protobuf/ReplicatorMessages.proto new file mode 100644 index 0000000000..859e379c26 --- /dev/null +++ b/akka-distributed-data/src/main/protobuf/ReplicatorMessages.proto @@ -0,0 +1,118 @@ +/** + * Copyright (C) 2014-2015 Typesafe Inc. + */ +package akka.cluster.ddata; + +option java_package = "akka.cluster.ddata.protobuf.msg"; +option optimize_for = SPEED; + +message Get { + required OtherMessage key = 1; + required sint32 consistency = 2; + required uint32 timeout = 3; + optional OtherMessage request = 4; +} + +message GetSuccess { + required OtherMessage key = 1; + required OtherMessage data = 2; + optional OtherMessage request = 4; +} + +message NotFound { + required OtherMessage key = 1; + optional OtherMessage request = 2; +} + +message GetFailure { + required OtherMessage key = 1; + optional OtherMessage request = 2; +} + +message Subscribe { + required OtherMessage key = 1; + required string ref = 2; +} + +message Unsubscribe { + required OtherMessage key = 1; + required string ref = 2; +} + +message Changed { + required OtherMessage key = 1; + required OtherMessage data = 2; +} + +message Write { + required string key = 1; + required DataEnvelope envelope = 2; +} + +// message WriteAck, via Empty + +message Empty { +} + +message Read { + required string key = 1; +} + +message ReadResult { + optional DataEnvelope envelope = 1; +} + +message DataEnvelope { + message PruningEntry { + required UniqueAddress removedAddress = 1; + required UniqueAddress ownerAddress = 2; + required bool performed = 3; + repeated Address seen = 4; + } + + required OtherMessage data = 1; + repeated PruningEntry pruning = 2; +} + +message Status { + message Entry { + required string key = 1; + required bytes digest = 2; + } + + required uint32 chunk = 1; + required uint32 totChunks = 2; + repeated Entry entries = 3; +} + +message Gossip { + message Entry { + required string key = 1; + required DataEnvelope envelope = 2; + } + + required bool sendBack = 1; + repeated Entry entries = 2; +} + +message UniqueAddress { + required Address address = 1; + required sfixed32 uid = 2; +} + +message Address { + required string hostname = 1; + required uint32 port = 2; +} + +message OtherMessage { + required bytes enclosedMessage = 1; + required int32 serializerId = 2; + optional bytes messageManifest = 4; +} + +message StringGSet { + repeated string elements = 1; +} + + diff --git a/akka-distributed-data/src/main/resources/reference.conf b/akka-distributed-data/src/main/resources/reference.conf new file mode 100644 index 0000000000..8c6cd55776 --- /dev/null +++ b/akka-distributed-data/src/main/resources/reference.conf @@ -0,0 +1,61 @@ +############################################## +# Akka Distributed DataReference Config File # +############################################## + +# This is the reference config file that contains all the default settings. +# Make your edits/overrides in your application.conf. + + +#//#distributed-data +# Settings for the DistributedData extension +akka.cluster.distributed-data { + # Actor name of the Replicator actor, /system/ddataReplicator + name = ddataReplicator + + # Replicas are running on members tagged with this role. + # All members are used if undefined or empty. + role = "" + + # How often the Replicator should send out gossip information + gossip-interval = 2 s + + # How often the subscribers will be notified of changes, if any + notify-subscribers-interval = 500 ms + + # Maximum number of entries to transfer in one gossip message when synchronizing + # the replicas. Next chunk will be transferred in next round of gossip. + max-delta-elements = 1000 + + # The id of the dispatcher to use for Replicator actors. If not specified + # default dispatcher is used. + # If specified you need to define the settings of the actual dispatcher. + use-dispatcher = "" + + # How often the Replicator checks for pruning of data associated with + # removed cluster nodes. + pruning-interval = 30 s + + # How long time it takes (worst case) to spread the data to all other replica nodes. + # This is used when initiating and completing the pruning process of data associated + # with removed cluster nodes. The time measurement is stopped when any replica is + # unreachable, so it should be configured to worst case in a healthy cluster. + max-pruning-dissemination = 60 s + +} +#//#distributed-data + +# Protobuf serializer for cluster DistributedData messages +akka.actor { + serializers { + akka-data-replication = "akka.cluster.ddata.protobuf.ReplicatorMessageSerializer" + akka-replicated-data = "akka.cluster.ddata.protobuf.ReplicatedDataSerializer" + } + serialization-bindings { + "akka.cluster.ddata.Replicator$ReplicatorMessage" = akka-data-replication + "akka.cluster.ddata.ReplicatedDataSerialization" = akka-replicated-data + } + serialization-identifiers { + "akka.cluster.ddata.protobuf.ReplicatedDataSerializer" = 11 + "akka.cluster.ddata.protobuf.ReplicatorMessageSerializer" = 12 + } +} diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala new file mode 100644 index 0000000000..138c62d5b9 --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala @@ -0,0 +1,52 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import scala.concurrent.duration._ + +import akka.actor.ActorRef +import akka.actor.ActorSystem +import akka.actor.ExtendedActorSystem +import akka.actor.Extension +import akka.actor.ExtensionId +import akka.actor.ExtensionIdProvider +import akka.cluster.Cluster + +object DistributedData extends ExtensionId[DistributedData] with ExtensionIdProvider { + override def get(system: ActorSystem): DistributedData = super.get(system) + + override def lookup = DistributedData + + override def createExtension(system: ExtendedActorSystem): DistributedData = + new DistributedData(system) +} + +/** + * Akka extension for convenient configuration and use of the + * [[Replicator]]. Configuration settings are defined in the + * `akka.cluster.ddata` section, see `reference.conf`. + */ +class DistributedData(system: ExtendedActorSystem) extends Extension { + + private val config = system.settings.config.getConfig("akka.cluster.distributed-data") + private val settings = ReplicatorSettings(config) + + /** + * Returns true if this member is not tagged with the role configured for the + * replicas. + */ + def isTerminated: Boolean = Cluster(system).isTerminated || !settings.role.forall(Cluster(system).selfRoles.contains) + + /** + * `ActorRef` of the [[Replicator]] . + */ + val replicator: ActorRef = + if (isTerminated) { + system.log.warning("Replicator points to dead letters: Make sure the cluster node is not terminated and has the proper role!") + system.deadLetters + } else { + val name = config.getString("name") + system.systemActorOf(Replicator.props(settings), name) + } +} diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Flag.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Flag.scala new file mode 100644 index 0000000000..233e6a66bd --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Flag.scala @@ -0,0 +1,45 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +object Flag { + /** + * `Flag` that is initialized to `false`. + */ + val empty = new Flag(false) + def apply(): Flag = empty + /** + * Java API: `Flag` that is initialized to `false`. + */ + def create(): Flag = empty + + // unapply from case class +} + +/** + * Implements a boolean flag CRDT that is initialized to `false` and + * can be switched to `true`. `true` wins over `false` in merge. + * + * This class is immutable, i.e. "modifying" methods return a new instance. + */ +@SerialVersionUID(1L) +final case class Flag(enabled: Boolean) extends ReplicatedData with ReplicatedDataSerialization { + + type T = Flag + + def switchOn: Flag = + if (enabled) this + else Flag(true) + + override def merge(that: Flag): Flag = + if (that.enabled) that + else this +} + +object FlagKey { + def create(id: String): Key[Flag] = FlagKey(id) +} + +@SerialVersionUID(1L) +final case class FlagKey(_id: String) extends Key[Flag](_id) with ReplicatedDataSerialization diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala new file mode 100644 index 0000000000..e6ca8c7b3c --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala @@ -0,0 +1,131 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import akka.cluster.Cluster +import akka.cluster.UniqueAddress +import java.math.BigInteger + +object GCounter { + val empty: GCounter = new GCounter + def apply(): GCounter = empty + /** + * Java API + */ + def create(): GCounter = empty + + /** + * Extract the [[GCounter#value]]. + */ + def unapply(c: GCounter): Option[BigInt] = Some(c.value) + + private val Zero = BigInt(0) +} + +/** + * Implements a 'Growing Counter' CRDT, also called a 'G-Counter'. + * + * It is described in the paper + * A comprehensive study of Convergent and Commutative Replicated Data Types. + * + * A G-Counter is a increment-only counter (inspired by vector clocks) in + * which only increment and merge are possible. Incrementing the counter + * adds 1 to the count for the current node. Divergent histories are + * resolved by taking the maximum count for each node (like a vector + * clock merge). The value of the counter is the sum of all node counts. + * + * This class is immutable, i.e. "modifying" methods return a new instance. + */ +@SerialVersionUID(1L) +final class GCounter private[akka] ( + private[akka] val state: Map[UniqueAddress, BigInt] = Map.empty) + extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning { + + import GCounter.Zero + + type T = GCounter + + /** + * Scala API: Current total value of the counter. + */ + def value: BigInt = state.values.foldLeft(Zero) { (acc, v) ⇒ acc + v } + + /** + * Java API: Current total value of the counter. + */ + def getValue: BigInteger = value.bigInteger + + /** + * Increment the counter with the delta specified. + * The delta must be zero or positive. + */ + def +(delta: Long)(implicit node: Cluster): GCounter = increment(node, delta) + + /** + * Increment the counter with the delta specified. + * The delta must be zero or positive. + */ + def increment(node: Cluster, delta: Long = 1): GCounter = + increment(node.selfUniqueAddress, delta) + + /** + * INTERNAL API + */ + private[akka] def increment(key: UniqueAddress): GCounter = increment(key, 1) + + /** + * INTERNAL API + */ + private[akka] def increment(key: UniqueAddress, delta: BigInt): GCounter = { + require(delta >= 0, "Can't decrement a GCounter") + if (delta == 0) this + else state.get(key) match { + case Some(v) ⇒ + val tot = v + delta + new GCounter(state + (key -> tot)) + case None ⇒ new GCounter(state + (key -> delta)) + } + } + + override def merge(that: GCounter): GCounter = { + var merged = that.state + for ((key, thisValue) ← state) { + val thatValue = merged.getOrElse(key, Zero) + if (thisValue > thatValue) + merged = merged.updated(key, thisValue) + } + new GCounter(merged) + } + + override def needPruningFrom(removedNode: UniqueAddress): Boolean = + state.contains(removedNode) + + override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): GCounter = + state.get(removedNode) match { + case Some(value) ⇒ new GCounter(state - removedNode).increment(collapseInto, value) + case None ⇒ this + } + + override def pruningCleanup(removedNode: UniqueAddress): GCounter = + new GCounter(state - removedNode) + + // this class cannot be a `case class` because we need different `unapply` + + override def toString: String = s"GCounter($value)" + + override def equals(o: Any): Boolean = o match { + case other: GCounter ⇒ state == other.state + case _ ⇒ false + } + + override def hashCode: Int = state.hashCode + +} + +object GCounterKey { + def create(id: String): Key[GCounter] = GCounterKey(id) +} + +@SerialVersionUID(1L) +final case class GCounterKey(_id: String) extends Key[GCounter](_id) with ReplicatedDataSerialization diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GSet.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GSet.scala new file mode 100644 index 0000000000..29e5e646da --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GSet.scala @@ -0,0 +1,66 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +object GSet { + private val _empty: GSet[Any] = new GSet(Set.empty) + def empty[A]: GSet[A] = _empty.asInstanceOf[GSet[A]] + def apply(): GSet[Any] = _empty + /** + * Java API + */ + def create[A](): GSet[A] = empty[A] + + // unapply from case class +} + +/** + * Implements a 'Add Set' CRDT, also called a 'G-Set'. You can't + * remove elements of a G-Set. + * + * It is described in the paper + * A comprehensive study of Convergent and Commutative Replicated Data Types. + * + * A G-Set doesn't accumulate any garbage apart from the elements themselves. + * + * This class is immutable, i.e. "modifying" methods return a new instance. + */ +@SerialVersionUID(1L) +final case class GSet[A](elements: Set[A]) extends ReplicatedData with ReplicatedDataSerialization { + + type T = GSet[A] + + /** + * Java API + */ + def getElements(): java.util.Set[A] = { + import scala.collection.JavaConverters._ + elements.asJava + } + + def contains(a: A): Boolean = elements(a) + + def isEmpty: Boolean = elements.isEmpty + + def size: Int = elements.size + + /** + * Adds an element to the set + */ + def +(element: A): GSet[A] = add(element) + + /** + * Adds an element to the set + */ + def add(element: A): GSet[A] = copy(elements + element) + + override def merge(that: GSet[A]): GSet[A] = copy(elements ++ that.elements) +} + +object GSetKey { + def create[A](id: String): Key[GSet[A]] = GSetKey(id) +} + +@SerialVersionUID(1L) +final case class GSetKey[A](_id: String) extends Key[GSet[A]](_id) with ReplicatedDataSerialization diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala new file mode 100644 index 0000000000..99fc56a94e --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala @@ -0,0 +1,35 @@ +/** + * Copyright (C) 2015 Typesafe Inc. + */ +package akka.cluster.ddata + +object Key { + /** + * Extract the [[Key#id]]. + */ + def unapply(k: Key[_]): Option[String] = Some(k.id) + + private[akka]type KeyR = Key[ReplicatedData] + +} + +/** + * Key for the key-value data in [[Replicator]]. The type of the data value + * is defined in the key. Keys are compared equal if the `id` strings are equal, + * i.e. use unique identifiers. + * + * Specific classes are provided for the built in data types, e.g. [[ORSetKey]], + * and you can create your own keys. + */ +abstract class Key[+T <: ReplicatedData](val id: String) extends Serializable { + + override final def equals(o: Any): Boolean = o match { + case k: Key[_] ⇒ id == k.id + case _ ⇒ false + } + + override final def hashCode: Int = id.hashCode + + override def toString(): String = id +} + diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala new file mode 100644 index 0000000000..1cdb864d60 --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala @@ -0,0 +1,154 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import akka.cluster.Cluster +import akka.cluster.UniqueAddress + +object LWWMap { + private val _empty: LWWMap[Any] = new LWWMap(ORMap.empty) + def empty[A]: LWWMap[A] = _empty.asInstanceOf[LWWMap[A]] + def apply(): LWWMap[Any] = _empty + /** + * Java API + */ + def create[A](): LWWMap[A] = empty + + /** + * Extract the [[LWWMap#entries]]. + */ + def unapply[A](m: LWWMap[A]): Option[Map[String, A]] = Some(m.entries) +} + +/** + * Specialized [[ORMap]] with [[LWWRegister]] values. + * + * `LWWRegister` relies on synchronized clocks and should only be used when the choice of + * value is not important for concurrent updates occurring within the clock skew. + * + * Instead of using timestamps based on `System.currentTimeMillis()` time it is possible to + * use a timestamp value based on something else, for example an increasing version number + * from a database record that is used for optimistic concurrency control. + * + * For first-write-wins semantics you can use the [[LWWRegister#reverseClock]] instead of the + * [[LWWRegister#defaultClock]] + * + * This class is immutable, i.e. "modifying" methods return a new instance. + */ +@SerialVersionUID(1L) +final class LWWMap[A] private[akka] ( + private[akka] val underlying: ORMap[LWWRegister[A]]) + extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning { + import LWWRegister.{ Clock, defaultClock } + + type T = LWWMap[A] + + def entries: Map[String, A] = underlying.entries.map { case (k, r) ⇒ k -> r.value } + + def get(key: String): Option[A] = underlying.get(key).map(_.value) + + def contains(key: String): Boolean = underlying.contains(key) + + def isEmpty: Boolean = underlying.isEmpty + + def size: Int = underlying.size + + /** + * Adds an entry to the map + */ + def +(entry: (String, A))(implicit node: Cluster): LWWMap[A] = { + val (key, value) = entry + put(node, key, value) + } + + /** + * Adds an entry to the map + */ + def put(node: Cluster, key: String, value: A): LWWMap[A] = + put(node, key, value, defaultClock[A]) + + /** + * Adds an entry to the map. + * + * You can provide your `clock` implementation instead of using timestamps based + * on `System.currentTimeMillis()` time. The timestamp can for example be an + * increasing version number from a database record that is used for optimistic + * concurrency control. + */ + def put(node: Cluster, key: String, value: A, clock: Clock[A]): LWWMap[A] = + put(node.selfUniqueAddress, key, value, clock) + + /** + * Adds an entry to the map. + * + * You can provide your `clock` implementation instead of using timestamps based + * on `System.currentTimeMillis()` time. The timestamp can for example be an + * increasing version number from a database record that is used for optimistic + * concurrency control. + */ + def put(key: String, value: A)(implicit node: Cluster, clock: Clock[A] = defaultClock[A]): LWWMap[A] = + put(node, key, value, clock) + + /** + * INTERNAL API + */ + private[akka] def put(node: UniqueAddress, key: String, value: A, clock: Clock[A]): LWWMap[A] = { + val newRegister = underlying.get(key) match { + case Some(r) ⇒ r.withValue(node, value, clock) + case None ⇒ LWWRegister(node, value, clock) + } + new LWWMap(underlying.put(node, key, newRegister)) + } + + /** + * Removes an entry from the map. + * Note that if there is a conflicting update on another node the entry will + * not be removed after merge. + */ + def -(key: String)(implicit node: Cluster): LWWMap[A] = remove(node, key) + + /** + * Removes an entry from the map. + * Note that if there is a conflicting update on another node the entry will + * not be removed after merge. + */ + def remove(node: Cluster, key: String): LWWMap[A] = + remove(node.selfUniqueAddress, key) + + /** + * INTERNAL API + */ + private[akka] def remove(node: UniqueAddress, key: String): LWWMap[A] = + new LWWMap(underlying.remove(node, key)) + + override def merge(that: LWWMap[A]): LWWMap[A] = + new LWWMap(underlying.merge(that.underlying)) + + override def needPruningFrom(removedNode: UniqueAddress): Boolean = + underlying.needPruningFrom(removedNode) + + override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): LWWMap[A] = + new LWWMap(underlying.prune(removedNode, collapseInto)) + + override def pruningCleanup(removedNode: UniqueAddress): LWWMap[A] = + new LWWMap(underlying.pruningCleanup(removedNode)) + + // this class cannot be a `case class` because we need different `unapply` + + override def toString: String = s"LWW$entries" //e.g. LWWMap(a -> 1, b -> 2) + + override def equals(o: Any): Boolean = o match { + case other: LWWMap[_] ⇒ underlying == other.underlying + case _ ⇒ false + } + + override def hashCode: Int = underlying.hashCode +} + +object LWWMapKey { + def create[A](id: String): Key[LWWMap[A]] = LWWMapKey(id) +} + +@SerialVersionUID(1L) +final case class LWWMapKey[A](_id: String) extends Key[LWWMap[A]](_id) with ReplicatedDataSerialization diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala new file mode 100644 index 0000000000..d2ccc1ad87 --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala @@ -0,0 +1,178 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import akka.cluster.Cluster +import akka.cluster.UniqueAddress +import akka.util.HashCode + +object LWWRegister { + + trait Clock[A] { + /** + * @param currentTimestamp the current `timestamp` value of the `LWWRegister` + * @param value the register value to set and associate with the returned timestamp + * @return next timestamp + */ + def apply(currentTimestamp: Long, value: A): Long + } + + private val _defaultClock: Clock[Any] = new Clock[Any] { + override def apply(currentTimestamp: Long, value: Any): Long = + math.max(System.currentTimeMillis(), currentTimestamp + 1) + } + + /** + * The default [[LWWRegister.Clock]] is using max value of `System.currentTimeMillis()` + * and `currentTimestamp + 1`. + */ + def defaultClock[A]: Clock[A] = _defaultClock.asInstanceOf[Clock[A]] + + private val _reverseClock = new Clock[Any] { + override def apply(currentTimestamp: Long, value: Any): Long = + math.min(-System.currentTimeMillis(), currentTimestamp - 1) + } + + /** + * This [[LWWRegister.Clock]] can be used for first-write-wins semantics. It is using min value of + * `-System.currentTimeMillis()` and `currentTimestamp + 1`, i.e. it is counting backwards. + */ + def reverseClock[A]: Clock[A] = _reverseClock.asInstanceOf[Clock[A]] + + /** + * INTERNAL API + */ + private[akka] def apply[A](node: UniqueAddress, initialValue: A, clock: Clock[A]): LWWRegister[A] = + new LWWRegister(node, initialValue, clock(0L, initialValue)) + + def apply[A](initialValue: A)(implicit node: Cluster, clock: Clock[A] = defaultClock[A]): LWWRegister[A] = + apply(node.selfUniqueAddress, initialValue, clock) + + /** + * Java API + */ + def create[A](node: Cluster, initialValue: A): LWWRegister[A] = + apply(initialValue)(node) + + /** + * Java API + */ + def create[A](node: Cluster, initialValue: A, clock: Clock[A]): LWWRegister[A] = + apply(initialValue)(node, clock) + + /** + * Extract the [[LWWRegister#value]]. + */ + def unapply[A](c: LWWRegister[A]): Option[A] = Some(c.value) + +} + +/** + * Implements a 'Last Writer Wins Register' CRDT, also called a 'LWW-Register'. + * + * It is described in the paper + * A comprehensive study of Convergent and Commutative Replicated Data Types. + * + * Merge takes the the register with highest timestamp. Note that this + * relies on synchronized clocks. `LWWRegister` should only be used when the choice of + * value is not important for concurrent updates occurring within the clock skew. + * + * Merge takes the register updated by the node with lowest address (`UniqueAddress` is ordered) + * if the timestamps are exactly the same. + * + * Instead of using timestamps based on `System.currentTimeMillis()` time it is possible to + * use a timestamp value based on something else, for example an increasing version number + * from a database record that is used for optimistic concurrency control. + * + * For first-write-wins semantics you can use the [[LWWRegister#reverseClock]] instead of the + * [[LWWRegister#defaultClock]] + * + * This class is immutable, i.e. "modifying" methods return a new instance. + */ +@SerialVersionUID(1L) +final class LWWRegister[A] private[akka] ( + private[akka] val node: UniqueAddress, + val value: A, + val timestamp: Long) + extends ReplicatedData with ReplicatedDataSerialization { + import LWWRegister.{ Clock, defaultClock } + + type T = LWWRegister[A] + + /** + * Java API + */ + def getValue(): A = value + + /** + * Change the value of the register. + * + * You can provide your `clock` implementation instead of using timestamps based + * on `System.currentTimeMillis()` time. The timestamp can for example be an + * increasing version number from a database record that is used for optimistic + * concurrency control. + */ + def withValue(value: A)(implicit node: Cluster, clock: Clock[A] = defaultClock[A]): LWWRegister[A] = + withValue(node, value) + + /** + * Change the value of the register. + */ + def withValue(node: Cluster, value: A): LWWRegister[A] = + withValue(node, value, defaultClock[A]) + + /** + * Change the value of the register. + * + * You can provide your `clock` implementation instead of using timestamps based + * on `System.currentTimeMillis()` time. The timestamp can for example be an + * increasing version number from a database record that is used for optimistic + * concurrency control. + */ + def withValue(node: Cluster, value: A, clock: Clock[A]): LWWRegister[A] = + withValue(node.selfUniqueAddress, value, clock) + + /** + * The current `value` was set by this node. + */ + def updatedBy: UniqueAddress = node + + /** + * INTERNAL API + */ + private[akka] def withValue(node: UniqueAddress, value: A, clock: Clock[A]): LWWRegister[A] = + new LWWRegister(node, value, clock(timestamp, value)) + + override def merge(that: LWWRegister[A]): LWWRegister[A] = + if (that.timestamp > this.timestamp) that + else if (that.timestamp < this.timestamp) this + else if (that.node < this.node) that + else this + + // this class cannot be a `case class` because we need different `unapply` + + override def toString: String = s"LWWRegister($value)" + + override def equals(o: Any): Boolean = o match { + case other: LWWRegister[_] ⇒ + timestamp == other.timestamp && value == other.value && node == other.node + case _ ⇒ false + } + + override def hashCode: Int = { + var result = HashCode.SEED + result = HashCode.hash(result, timestamp) + result = HashCode.hash(result, node) + result = HashCode.hash(result, value) + result + } + +} + +object LWWRegisterKey { + def create[A](id: String): Key[LWWRegister[A]] = LWWRegisterKey(id) +} + +@SerialVersionUID(1L) +final case class LWWRegisterKey[A](_id: String) extends Key[LWWRegister[A]](_id) with ReplicatedDataSerialization diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala new file mode 100644 index 0000000000..fdc52f4da1 --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala @@ -0,0 +1,231 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import akka.cluster.Cluster +import akka.cluster.UniqueAddress +import akka.util.HashCode +import akka.japi.function.{ Function ⇒ JFunction } + +object ORMap { + private val _empty: ORMap[ReplicatedData] = new ORMap(ORSet.empty, Map.empty) + def empty[A <: ReplicatedData]: ORMap[A] = _empty.asInstanceOf[ORMap[A]] + def apply(): ORMap[ReplicatedData] = _empty + /** + * Java API + */ + def create[A <: ReplicatedData](): ORMap[A] = empty[A] + + /** + * Extract the [[ORMap#entries]]. + */ + def unapply[A <: ReplicatedData](m: ORMap[A]): Option[Map[String, A]] = Some(m.entries) + +} + +/** + * Implements a 'Observed Remove Map' CRDT, also called a 'OR-Map'. + * + * It has similar semantics as an [[ORSet]], but in case of concurrent updates + * the values are merged, and must therefore be [[ReplicatedData]] types themselves. + * + * This class is immutable, i.e. "modifying" methods return a new instance. + */ +@SerialVersionUID(1L) +final class ORMap[A <: ReplicatedData] private[akka] ( + private[akka] val keys: ORSet[String], + private[akka] val values: Map[String, A]) + extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning { + + type T = ORMap[A] + + /** + * Scala API: All entries of the map. + */ + def entries: Map[String, A] = values + + /** + * Java API: All entries of the map. + */ + def getEntries(): java.util.Map[String, A] = { + import scala.collection.JavaConverters._ + entries.asJava + } + + def get(key: String): Option[A] = values.get(key) + + /** + * Scala API: Get the value associated with the key if there is one, + * else return the given default. + */ + def getOrElse(key: String, default: ⇒ A): A = values.getOrElse(key, default) + + def contains(key: String): Boolean = values.contains(key) + + def isEmpty: Boolean = values.isEmpty + + def size: Int = values.size + + /** + * Adds an entry to the map + * @see [[#put]] + */ + def +(entry: (String, A))(implicit node: Cluster): ORMap[A] = { + val (key, value) = entry + put(node, key, value) + } + + /** + * Adds an entry to the map. + * Note that the new `value` will be merged with existing values + * on other nodes and the outcome depends on what `ReplicatedData` + * type that is used. + * + * Consider using [[#updated]] instead of `put` if you want modify + * existing entry. + * + * `IllegalArgumentException` is thrown if you try to replace an existing `ORSet` + * value, because important history can be lost when replacing the `ORSet` and + * undesired effects of merging will occur. + */ + def put(node: Cluster, key: String, value: A): ORMap[A] = put(node.selfUniqueAddress, key, value) + + /** + * INTERNAL API + */ + private[akka] def put(node: UniqueAddress, key: String, value: A): ORMap[A] = + if (value.isInstanceOf[ORSet[_]] && values.contains(key)) + throw new IllegalArgumentException( + "`ORMap.put` must not be used to replace an existing `ORSet` " + + "value, because important history can be lost when replacing the `ORSet` and " + + "undesired effects of merging will occur. Use `ORMap.updated` instead.") + else + new ORMap(keys.add(node, key), values.updated(key, value)) + + /** + * Scala API: Replace a value by applying the `modify` function on the existing value. + * + * If there is no current value for the `key` the `initial` value will be + * passed to the `modify` function. + */ + def updated(node: Cluster, key: String, initial: A)(modify: A ⇒ A): ORMap[A] = + updated(node.selfUniqueAddress, key, initial)(modify) + + /** + * Java API: Replace a value by applying the `modify` function on the existing value. + * + * If there is no current value for the `key` the `initial` value will be + * passed to the `modify` function. + */ + def updated(node: Cluster, key: String, initial: A, modify: java.util.function.Function[A, A]): ORMap[A] = + updated(node, key, initial)(value ⇒ modify.apply(value)) + + /** + * INTERNAL API + */ + private[akka] def updated(node: UniqueAddress, key: String, initial: A)(modify: A ⇒ A): ORMap[A] = { + val newValue = values.get(key) match { + case Some(old) ⇒ modify(old) + case _ ⇒ modify(initial) + } + new ORMap(keys.add(node, key), values.updated(key, newValue)) + } + + /** + * Removes an entry from the map. + * Note that if there is a conflicting update on another node the entry will + * not be removed after merge. + */ + def -(key: String)(implicit node: Cluster): ORMap[A] = remove(node, key) + + /** + * Removes an entry from the map. + * Note that if there is a conflicting update on another node the entry will + * not be removed after merge. + */ + def remove(node: Cluster, key: String): ORMap[A] = remove(node.selfUniqueAddress, key) + + /** + * INTERNAL API + */ + private[akka] def remove(node: UniqueAddress, key: String): ORMap[A] = { + new ORMap(keys.remove(node, key), values - key) + } + + override def merge(that: ORMap[A]): ORMap[A] = { + val mergedKeys = keys.merge(that.keys) + var mergedValues = Map.empty[String, A] + mergedKeys.elementsMap.keysIterator.foreach { key ⇒ + (this.values.get(key), that.values.get(key)) match { + case (Some(thisValue), Some(thatValue)) ⇒ + if (thisValue.getClass != thatValue.getClass) { + val errMsg = s"Wrong type for merging [$key] in [${getClass.getName}], existing type " + + s"[${thisValue.getClass.getName}], got [${thatValue.getClass.getName}]" + throw new IllegalArgumentException(errMsg) + } + // TODO can we get rid of these (safe) casts? + val mergedValue = thisValue.merge(thatValue.asInstanceOf[thisValue.T]).asInstanceOf[A] + mergedValues = mergedValues.updated(key, mergedValue) + case (Some(thisValue), None) ⇒ + mergedValues = mergedValues.updated(key, thisValue) + case (None, Some(thatValue)) ⇒ + mergedValues = mergedValues.updated(key, thatValue) + case (None, None) ⇒ throw new IllegalStateException(s"missing value for $key") + } + } + + new ORMap(mergedKeys, mergedValues) + } + + override def needPruningFrom(removedNode: UniqueAddress): Boolean = { + keys.needPruningFrom(removedNode) || values.exists { + case (_, data: RemovedNodePruning) ⇒ data.needPruningFrom(removedNode) + case _ ⇒ false + } + } + + override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): ORMap[A] = { + val prunedKeys = keys.prune(removedNode, collapseInto) + val prunedValues = values.foldLeft(values) { + case (acc, (key, data: RemovedNodePruning)) if data.needPruningFrom(removedNode) ⇒ + acc.updated(key, data.prune(removedNode, collapseInto).asInstanceOf[A]) + case (acc, _) ⇒ acc + } + new ORMap(prunedKeys, prunedValues) + } + + override def pruningCleanup(removedNode: UniqueAddress): ORMap[A] = { + val pruningCleanupedKeys = keys.pruningCleanup(removedNode) + val pruningCleanupedValues = values.foldLeft(values) { + case (acc, (key, data: RemovedNodePruning)) if data.needPruningFrom(removedNode) ⇒ + acc.updated(key, data.pruningCleanup(removedNode).asInstanceOf[A]) + case (acc, _) ⇒ acc + } + new ORMap(pruningCleanupedKeys, pruningCleanupedValues) + } + + // this class cannot be a `case class` because we need different `unapply` + + override def toString: String = s"OR$entries" + + override def equals(o: Any): Boolean = o match { + case other: ORMap[_] ⇒ keys == other.keys && values == other.values + case _ ⇒ false + } + + override def hashCode: Int = { + var result = HashCode.SEED + result = HashCode.hash(result, keys) + result = HashCode.hash(result, values) + result + } + +} + +object ORMapKey { + def create[A <: ReplicatedData](id: String): Key[ORMap[A]] = ORMapKey(id) +} + +@SerialVersionUID(1L) +final case class ORMapKey[A <: ReplicatedData](_id: String) extends Key[ORMap[A]](_id) with ReplicatedDataSerialization diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala new file mode 100644 index 0000000000..ad191c58fa --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala @@ -0,0 +1,299 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import scala.annotation.tailrec +import scala.collection.immutable.TreeMap + +import akka.cluster.Cluster +import akka.cluster.UniqueAddress +import akka.util.HashCode + +// TODO this class can be optimized, but I wanted to start with correct functionality and comparability with riak_dt_orswot + +object ORSet { + private val _empty: ORSet[Any] = new ORSet(Map.empty, VersionVector.empty) + def empty[A]: ORSet[A] = _empty.asInstanceOf[ORSet[A]] + def apply(): ORSet[Any] = _empty + /** + * Java API + */ + def create[A](): ORSet[A] = empty[A] + + /** + * Extract the [[ORSet#elements]]. + */ + def unapply[A](s: ORSet[A]): Option[Set[A]] = Some(s.elements) + + /** + * Extract the [[ORSet#elements]] of an `ORSet`. + */ + def unapply(a: ReplicatedData): Option[Set[Any]] = a match { + case s: ORSet[Any] @unchecked ⇒ Some(s.elements) + case _ ⇒ None + } + + /** + * INTERNAL API + */ + private[akka]type Dot = VersionVector + + /** + * INTERNAL API + * Subtract the `vvector` from the `dot`. + * What this means is that any (node, version) pair in + * `dot` that is <= an entry in `vvector` is removed from `dot`. + * Example [{a, 3}, {b, 2}, {d, 14}, {g, 22}] - + * [{a, 4}, {b, 1}, {c, 1}, {d, 14}, {e, 5}, {f, 2}] = + * [{b, 2}, {g, 22}] + */ + private[akka] def subtractDots(dot: Dot, vvector: VersionVector): Dot = { + + @tailrec def dropDots(remaining: List[(UniqueAddress, Long)], acc: List[(UniqueAddress, Long)]): List[(UniqueAddress, Long)] = + remaining match { + case Nil ⇒ acc + case (d @ (node, v1)) :: rest ⇒ + vvector.versions.get(node) match { + case Some(v2) if v2 >= v1 ⇒ + // dot is dominated by version vector, drop it + dropDots(rest, acc) + case _ ⇒ + dropDots(rest, d :: acc) + } + } + + val newDots = dropDots(dot.versions.toList, Nil) + new VersionVector(versions = TreeMap.empty[UniqueAddress, Long] ++ newDots) + } + + /** + * INTERNAL API + * @see [[ORSet#merge]] + */ + private[akka] def mergeCommonKeys[A](commonKeys: Set[A], lhs: ORSet[A], rhs: ORSet[A]): Map[A, ORSet.Dot] = { + commonKeys.foldLeft(Map.empty[A, ORSet.Dot]) { + case (acc, k) ⇒ + val lhsDots = lhs.elementsMap(k).versions + val rhsDots = rhs.elementsMap(k).versions + val commonDots = lhsDots.filter { + case (thisDotNode, v) ⇒ rhsDots.get(thisDotNode).exists(_ == v) + } + val commonDotsKeys = commonDots.keys + val lhsUniqueDots = lhsDots -- commonDotsKeys + val rhsUniqueDots = rhsDots -- commonDotsKeys + val lhsKeep = ORSet.subtractDots(new VersionVector(lhsUniqueDots), rhs.vvector) + val rhsKeep = ORSet.subtractDots(new VersionVector(rhsUniqueDots), lhs.vvector) + val merged = lhsKeep.merge(rhsKeep).merge(new VersionVector(versions = commonDots)) + // Perfectly possible that an item in both sets should be dropped + if (merged.versions.isEmpty) acc + else acc.updated(k, merged) + } + } + + /** + * INTERNAL API + * @see [[ORSet#merge]] + */ + private[akka] def mergeDisjointKeys[A](keys: Set[A], elementsMap: Map[A, ORSet.Dot], vvector: VersionVector, + accumulator: Map[A, ORSet.Dot]): Map[A, ORSet.Dot] = { + keys.foldLeft(accumulator) { + case (acc, k) ⇒ + val dots = elementsMap(k) + if (vvector > dots || vvector == dots) + acc + else { + // Optimise the set of stored dots to include only those unseen + val newDots = subtractDots(dots, vvector) + acc.updated(k, newDots) + } + } + } +} + +/** + * Implements a 'Observed Remove Set' CRDT, also called a 'OR-Set'. + * Elements can be added and removed any number of times. Concurrent add wins + * over remove. + * + * It is not implemented as in the paper + * A comprehensive study of Convergent and Commutative Replicated Data Types. + * This is more space efficient and doesn't accumulate garbage for removed elements. + * It is described in the paper + * An optimized conflict-free replicated set + * The implementation is inspired by the Riak DT + * riak_dt_orswot. + * + * The ORSet has a version vector that is incremented when an element is added to + * the set. The `node -> count` pair for that increment is stored against the + * element as its "birth dot". Every time the element is re-added to the set, + * its "birth dot" is updated to that of the `node -> count` version vector entry + * resulting from the add. When an element is removed, we simply drop it, no tombstones. + * + * When an element exists in replica A and not replica B, is it because A added + * it and B has not yet seen that, or that B removed it and A has not yet seen that? + * In this implementation we compare the `dot` of the present element to the version vector + * in the Set it is absent from. If the element dot is not "seen" by the Set version vector, + * that means the other set has yet to see this add, and the item is in the merged + * Set. If the Set version vector dominates the dot, that means the other Set has removed this + * element already, and the item is not in the merged Set. + * + * This class is immutable, i.e. "modifying" methods return a new instance. + */ +@SerialVersionUID(1L) +final class ORSet[A] private[akka] ( + private[akka] val elementsMap: Map[A, ORSet.Dot], + private[akka] val vvector: VersionVector) + extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning { + + type T = ORSet[A] + + /** + * Scala API + */ + def elements: Set[A] = elementsMap.keySet + + /** + * Java API + */ + def getElements(): java.util.Set[A] = { + import scala.collection.JavaConverters._ + elements.asJava + } + + def contains(a: A): Boolean = elementsMap.contains(a) + + def isEmpty: Boolean = elementsMap.isEmpty + + def size: Int = elementsMap.size + + /** + * Adds an element to the set + */ + def +(element: A)(implicit node: Cluster): ORSet[A] = add(node, element) + + /** + * Adds an element to the set + */ + def add(node: Cluster, element: A): ORSet[A] = add(node.selfUniqueAddress, element) + + /** + * INTERNAL API + */ + private[akka] def add(node: UniqueAddress, element: A): ORSet[A] = { + val newVvector = vvector + node + val newDot = new VersionVector(versions = TreeMap(node -> newVvector.versions(node))) + new ORSet(elementsMap = elementsMap.updated(element, newDot), vvector = newVvector) + } + + /** + * Removes an element from the set. + */ + def -(element: A)(implicit node: Cluster): ORSet[A] = remove(node, element) + + /** + * Removes an element from the set. + */ + def remove(node: Cluster, element: A): ORSet[A] = remove(node.selfUniqueAddress, element) + + /** + * INTERNAL API + */ + private[akka] def remove(node: UniqueAddress, element: A): ORSet[A] = + copy(elementsMap = elementsMap - element) + + /** + * Removes all elements from the set, but keeps the history. + * This has the same result as using [[#remove]] for each + * element, but it is more efficient. + */ + def clear(node: Cluster): ORSet[A] = clear(node.selfUniqueAddress) + + /** + * INTERNAL API + */ + private[akka] def clear(node: UniqueAddress): ORSet[A] = copy(elementsMap = Map.empty) + + /** + * When element is in this Set but not in that Set: + * Compare the "birth dot" of the present element to the version vector in the Set it is absent from. + * If the element dot is not "seen" by other Set version vector, that means the other set has yet to + * see this add, and the element is to be in the merged Set. + * If the other Set version vector dominates the dot, that means the other Set has removed + * the element already, and the element is not to be in the merged Set. + * + * When element in both this Set and in that Set: + * Some dots may still need to be shed. If this Set has dots that the other Set does not have, + * and the other Set version vector dominates those dots, then we need to drop those dots. + * Keep only common dots, and dots that are not dominated by the other sides version vector + */ + override def merge(that: ORSet[A]): ORSet[A] = { + val thisKeys = elementsMap.keySet + val thatKeys = that.elementsMap.keySet + val commonKeys = thisKeys.intersect(thatKeys) + val thisUniqueKeys = thisKeys -- commonKeys + val thatUniqueKeys = thatKeys -- commonKeys + + val entries00 = ORSet.mergeCommonKeys(commonKeys, this, that) + val entries0 = ORSet.mergeDisjointKeys(thisUniqueKeys, this.elementsMap, that.vvector, entries00) + val entries = ORSet.mergeDisjointKeys(thatUniqueKeys, that.elementsMap, this.vvector, entries0) + val mergedVvector = this.vvector.merge(that.vvector) + + new ORSet(entries, mergedVvector) + } + + override def needPruningFrom(removedNode: UniqueAddress): Boolean = + vvector.needPruningFrom(removedNode) + + override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): ORSet[A] = { + val pruned = elementsMap.foldLeft(Map.empty[A, ORSet.Dot]) { + case (acc, (elem, dot)) ⇒ + if (dot.needPruningFrom(removedNode)) acc.updated(elem, dot.prune(removedNode, collapseInto)) + else acc + } + if (pruned.isEmpty) + copy(vvector = vvector.prune(removedNode, collapseInto)) + else { + // re-add elements that were pruned, to bump dots to right vvector + val newSet = new ORSet(elementsMap = elementsMap ++ pruned, vvector = vvector.prune(removedNode, collapseInto)) + pruned.keys.foldLeft(newSet) { + case (s, elem) ⇒ s.add(collapseInto, elem) + } + } + } + + override def pruningCleanup(removedNode: UniqueAddress): ORSet[A] = { + val updated = elementsMap.foldLeft(elementsMap) { + case (acc, (elem, dot)) ⇒ + if (dot.needPruningFrom(removedNode)) acc.updated(elem, dot.pruningCleanup(removedNode)) + else acc + } + new ORSet(updated, vvector.pruningCleanup(removedNode)) + } + + private def copy(elementsMap: Map[A, ORSet.Dot] = this.elementsMap, vvector: VersionVector = this.vvector): ORSet[A] = + new ORSet(elementsMap, vvector) + + // this class cannot be a `case class` because we need different `unapply` + + override def toString: String = s"OR$elements" + + override def equals(o: Any): Boolean = o match { + case other: ORSet[_] ⇒ vvector == other.vvector && elementsMap == other.elementsMap + case _ ⇒ false + } + + override def hashCode: Int = { + var result = HashCode.SEED + result = HashCode.hash(result, elementsMap) + result = HashCode.hash(result, vvector) + result + } +} + +object ORSetKey { + def create[A](id: String): Key[ORSet[A]] = ORSetKey(id) +} + +@SerialVersionUID(1L) +final case class ORSetKey[A](_id: String) extends Key[ORSet[A]](_id) with ReplicatedDataSerialization diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala new file mode 100644 index 0000000000..8be1943639 --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala @@ -0,0 +1,134 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import akka.cluster.Cluster +import akka.cluster.UniqueAddress +import akka.util.HashCode +import java.math.BigInteger + +object PNCounter { + val empty: PNCounter = new PNCounter(GCounter.empty, GCounter.empty) + def apply(): PNCounter = empty + /** + * Java API + */ + def create(): PNCounter = empty + + /** + * Extract the [[GCounter#value]]. + */ + def unapply(c: PNCounter): Option[BigInt] = Some(c.value) +} + +/** + * Implements a 'Increment/Decrement Counter' CRDT, also called a 'PN-Counter'. + * + * It is described in the paper + * A comprehensive study of Convergent and Commutative Replicated Data Types. + * + * PN-Counters allow the counter to be incremented by tracking the + * increments (P) separate from the decrements (N). Both P and N are represented + * as two internal [[GCounter]]s. Merge is handled by merging the internal P and N + * counters. The value of the counter is the value of the P counter minus + * the value of the N counter. + * + * This class is immutable, i.e. "modifying" methods return a new instance. + */ +@SerialVersionUID(1L) +final class PNCounter private[akka] ( + private[akka] val increments: GCounter, private[akka] val decrements: GCounter) + extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning { + + type T = PNCounter + + /** + * Scala API: Current total value of the counter. + */ + def value: BigInt = increments.value - decrements.value + + /** + * Java API: Current total value of the counter. + */ + def getValue: BigInteger = value.bigInteger + + /** + * Increment the counter with the delta specified. + * If the delta is negative then it will decrement instead of increment. + */ + def +(delta: Long)(implicit node: Cluster): PNCounter = increment(node, delta) + + /** + * Increment the counter with the delta specified. + * If the delta is negative then it will decrement instead of increment. + */ + def increment(node: Cluster, delta: Long = 1): PNCounter = + increment(node.selfUniqueAddress, delta) + + /** + * Decrement the counter with the delta specified. + * If the delta is negative then it will increment instead of decrement. + */ + def -(delta: Long)(implicit node: Cluster): PNCounter = decrement(node, delta) + + /** + * Decrement the counter with the delta specified. + * If the delta is negative then it will increment instead of decrement. + */ + def decrement(node: Cluster, delta: Long = 1): PNCounter = + decrement(node.selfUniqueAddress, delta) + + private[akka] def increment(key: UniqueAddress, delta: Long): PNCounter = change(key, delta) + private[akka] def increment(key: UniqueAddress): PNCounter = increment(key, 1) + private[akka] def decrement(key: UniqueAddress, delta: Long): PNCounter = change(key, -delta) + private[akka] def decrement(key: UniqueAddress): PNCounter = decrement(key, 1) + + private[akka] def change(key: UniqueAddress, delta: Long): PNCounter = + if (delta > 0) copy(increments = increments.increment(key, delta)) + else if (delta < 0) copy(decrements = decrements.increment(key, -delta)) + else this + + override def merge(that: PNCounter): PNCounter = + copy(increments = that.increments.merge(this.increments), + decrements = that.decrements.merge(this.decrements)) + + override def needPruningFrom(removedNode: UniqueAddress): Boolean = + increments.needPruningFrom(removedNode) || decrements.needPruningFrom(removedNode) + + override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): PNCounter = + copy(increments = increments.prune(removedNode, collapseInto), + decrements = decrements.prune(removedNode, collapseInto)) + + override def pruningCleanup(removedNode: UniqueAddress): PNCounter = + copy(increments = increments.pruningCleanup(removedNode), + decrements = decrements.pruningCleanup(removedNode)) + + private def copy(increments: GCounter = this.increments, decrements: GCounter = this.decrements): PNCounter = + new PNCounter(increments, decrements) + + // this class cannot be a `case class` because we need different `unapply` + + override def toString: String = s"PNCounter($value)" + + override def equals(o: Any): Boolean = o match { + case other: PNCounter ⇒ + increments == other.increments && decrements == other.decrements + case _ ⇒ false + } + + override def hashCode: Int = { + var result = HashCode.SEED + result = HashCode.hash(result, increments) + result = HashCode.hash(result, decrements) + result + } + +} + +object PNCounterKey { + def create[A](id: String): Key[PNCounter] = PNCounterKey(id) +} + +@SerialVersionUID(1L) +final case class PNCounterKey(_id: String) extends Key[PNCounter](_id) with ReplicatedDataSerialization diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala new file mode 100644 index 0000000000..8680e170ad --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala @@ -0,0 +1,149 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import akka.cluster.Cluster +import akka.cluster.UniqueAddress +import java.math.BigInteger + +object PNCounterMap { + val empty: PNCounterMap = new PNCounterMap(ORMap.empty) + def apply(): PNCounterMap = empty + /** + * Java API + */ + def create(): PNCounterMap = empty + + /** + * Extract the [[PNCounterMap#entries]]. + */ + def unapply(m: PNCounterMap): Option[Map[String, BigInt]] = Some(m.entries) +} + +/** + * Map of named counters. Specialized [[ORMap]] with [[PNCounter]] values. + * + * This class is immutable, i.e. "modifying" methods return a new instance. + */ +@SerialVersionUID(1L) +final class PNCounterMap private[akka] ( + private[akka] val underlying: ORMap[PNCounter]) + extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning { + + type T = PNCounterMap + + /** Scala API */ + def entries: Map[String, BigInt] = underlying.entries.map { case (k, c) ⇒ k -> c.value } + + /** Java API */ + def getEntries: Map[String, BigInteger] = underlying.entries.map { case (k, c) ⇒ k -> c.value.bigInteger } + + /** + * Scala API: The count for a key + */ + def get(key: String): Option[BigInt] = underlying.get(key).map(_.value) + + /** + * Java API: The count for a key, or `null` if it doesn't exist + */ + def getValue(key: String): BigInteger = underlying.get(key).map(_.value.bigInteger).orNull + + def contains(key: String): Boolean = underlying.contains(key) + + def isEmpty: Boolean = underlying.isEmpty + + def size: Int = underlying.size + + /** + * Increment the counter with the delta specified. + * If the delta is negative then it will decrement instead of increment. + */ + def increment(key: String, delta: Long = 1)(implicit node: Cluster): PNCounterMap = + increment(node, key, delta) + + /** + * Increment the counter with the delta specified. + * If the delta is negative then it will decrement instead of increment. + */ + def increment(node: Cluster, key: String, delta: Long): PNCounterMap = + increment(node.selfUniqueAddress, key, delta) + + /** + * INTERNAL API + */ + private[akka] def increment(node: UniqueAddress, key: String, delta: Long): PNCounterMap = + new PNCounterMap(underlying.updated(node, key, PNCounter())(_.increment(node, delta))) + + /** + * Decrement the counter with the delta specified. + * If the delta is negative then it will increment instead of decrement. + */ + def decrement(key: String, delta: Long = 1)(implicit node: Cluster): PNCounterMap = + decrement(node, key, delta) + + /** + * Decrement the counter with the delta specified. + * If the delta is negative then it will increment instead of decrement. + */ + def decrement(node: Cluster, key: String, delta: Long): PNCounterMap = + decrement(node.selfUniqueAddress, key, delta) + + /** + * INTERNAL API + */ + private[akka] def decrement(node: UniqueAddress, key: String, delta: Long): PNCounterMap = { + new PNCounterMap(underlying.updated(node, key, PNCounter())(_.decrement(node, delta))) + } + + /** + * Removes an entry from the map. + * Note that if there is a conflicting update on another node the entry will + * not be removed after merge. + */ + def -(key: String)(implicit node: Cluster): PNCounterMap = remove(node, key) + + /** + * Removes an entry from the map. + * Note that if there is a conflicting update on another node the entry will + * not be removed after merge. + */ + def remove(node: Cluster, key: String): PNCounterMap = + remove(node.selfUniqueAddress, key) + + /** + * INTERNAL API + */ + private[akka] def remove(node: UniqueAddress, key: String): PNCounterMap = + new PNCounterMap(underlying.remove(node, key)) + + override def merge(that: PNCounterMap): PNCounterMap = + new PNCounterMap(underlying.merge(that.underlying)) + + override def needPruningFrom(removedNode: UniqueAddress): Boolean = + underlying.needPruningFrom(removedNode) + + override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): PNCounterMap = + new PNCounterMap(underlying.prune(removedNode, collapseInto)) + + override def pruningCleanup(removedNode: UniqueAddress): PNCounterMap = + new PNCounterMap(underlying.pruningCleanup(removedNode)) + + // this class cannot be a `case class` because we need different `unapply` + + override def toString: String = s"PNCounter$entries" + + override def equals(o: Any): Boolean = o match { + case other: PNCounterMap ⇒ underlying == other.underlying + case _ ⇒ false + } + + override def hashCode: Int = underlying.hashCode +} + +object PNCounterMapKey { + def create[A](id: String): Key[PNCounterMap] = PNCounterMapKey(id) +} + +@SerialVersionUID(1L) +final case class PNCounterMapKey(_id: String) extends Key[PNCounterMap](_id) with ReplicatedDataSerialization diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala new file mode 100644 index 0000000000..121b1de5cc --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala @@ -0,0 +1,45 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import akka.actor.Address +import akka.cluster.Member +import akka.cluster.UniqueAddress + +/** + * INTERNAL API + */ +private[akka] object PruningState { + sealed trait PruningPhase + final case class PruningInitialized(seen: Set[Address]) extends PruningPhase + case object PruningPerformed extends PruningPhase +} + +/** + * INTERNAL API + */ +private[akka] final case class PruningState(owner: UniqueAddress, phase: PruningState.PruningPhase) { + import PruningState._ + + def merge(that: PruningState): PruningState = + (this.phase, that.phase) match { + case (PruningPerformed, _) ⇒ this + case (_, PruningPerformed) ⇒ that + case (PruningInitialized(thisSeen), PruningInitialized(thatSeen)) ⇒ + if (this.owner == that.owner) + copy(phase = PruningInitialized(thisSeen ++ thatSeen)) + else if (Member.addressOrdering.compare(this.owner.address, that.owner.address) > 0) + that + else + this + } + + def addSeen(node: Address): PruningState = phase match { + case PruningInitialized(seen) ⇒ + if (seen(node) || owner.address == node) this + else copy(phase = PruningInitialized(seen + node)) + case _ ⇒ this + } +} + diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala new file mode 100644 index 0000000000..9f344ad9fb --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala @@ -0,0 +1,82 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import akka.cluster.UniqueAddress + +/** + * Interface for implementing a state based convergent + * replicated data type (CvRDT). + * + * ReplicatedData types must be serializable with an Akka + * Serializer. It is highly recommended to implement a serializer with + * Protobuf or similar. The built in data types are marked with + * [[ReplicatedDataSerialization]] and serialized with + * [[akka.cluster.ddata.protobuf.ReplicatedDataSerializer]]. + * + * Serialization of the data types are used in remote messages and also + * for creating message digests (SHA-1) to detect changes. Therefore it is + * important that the serialization produce the same bytes for the same content. + * For example sets and maps should be sorted deterministically in the serialization. + * + * ReplicatedData types should be immutable, i.e. "modifying" methods should return + * a new instance. + */ +trait ReplicatedData { + type T <: ReplicatedData + + /** + * Monotonic merge function. + */ + def merge(that: T): T + +} + +/** + * Java API: Interface for implementing a [[ReplicatedData]] in + * Java. + */ +abstract class AbstractReplicatedData extends ReplicatedData { + // it is not possible to use a more strict type, because it is erased somehow, and + // the implementation is anyway required to implement + // merge(that: ReplicatedData): ReplicatedData + type T = AbstractReplicatedData + +} + +/** + * [[ReplicatedData]] that has support for pruning of data + * belonging to a specific node may implement this interface. + * When a node is removed from the cluster these methods will be + * used by the [[Replicator]] to collapse data from the removed node + * into some other node in the cluster. + */ +trait RemovedNodePruning { this: ReplicatedData ⇒ + + /** + * Does it have any state changes from a specific node, + * which has been removed from the cluster. + */ + def needPruningFrom(removedNode: UniqueAddress): Boolean + + /** + * When the `removed` node has been removed from the cluster the state + * changes from that node will be pruned by collapsing the data entries + * to another node. + */ + def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): T + + /** + * Remove data entries from a node that has been removed from the cluster + * and already been pruned. + */ + def pruningCleanup(removedNode: UniqueAddress): T +} + +/** + * Marker trait for `ReplicatedData` serialized by + * [[akka.cluster.ddata.protobuf.ReplicatedDataSerializer]]. + */ +trait ReplicatedDataSerialization extends Serializable + diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala new file mode 100644 index 0000000000..3859cab017 --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala @@ -0,0 +1,1467 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import java.security.MessageDigest +import scala.annotation.tailrec +import scala.collection.immutable +import scala.collection.immutable.Queue +import scala.collection.mutable +import scala.concurrent.duration._ +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.forkjoin.ThreadLocalRandom +import scala.util.Failure +import scala.util.Success +import scala.util.Try +import scala.util.control.NoStackTrace +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.actor.ActorRef +import akka.actor.ActorSelection +import akka.actor.ActorSystem +import akka.actor.Address +import akka.actor.NoSerializationVerificationNeeded +import akka.actor.Deploy +import akka.actor.Props +import akka.actor.ReceiveTimeout +import akka.actor.Terminated +import akka.cluster.Cluster +import akka.cluster.ClusterEvent._ +import akka.cluster.ClusterEvent.InitialStateAsEvents +import akka.cluster.Member +import akka.cluster.UniqueAddress +import akka.serialization.SerializationExtension +import akka.util.ByteString +import com.typesafe.config.Config +import java.util.function.{ Function ⇒ JFunction } +import akka.dispatch.Dispatchers +import akka.actor.DeadLetterSuppression +import akka.cluster.ddata.Key.KeyR +import java.util.Optional + +object ReplicatorSettings { + + /** + * Create settings from the default configuration + * `akka.cluster.distributed-data`. + */ + def apply(system: ActorSystem): ReplicatorSettings = + apply(system.settings.config.getConfig("akka.cluster.distributed-data")) + + /** + * Create settings from a configuration with the same layout as + * the default configuration `akka.cluster.distributed-data`. + */ + def apply(config: Config): ReplicatorSettings = { + val dispatcher = config.getString("use-dispatcher") match { + case "" ⇒ Dispatchers.DefaultDispatcherId + case id ⇒ id + } + new ReplicatorSettings( + role = roleOption(config.getString("role")), + gossipInterval = config.getDuration("gossip-interval", MILLISECONDS).millis, + notifySubscribersInterval = config.getDuration("notify-subscribers-interval", MILLISECONDS).millis, + maxDeltaElements = config.getInt("max-delta-elements"), + dispatcher = dispatcher, + pruningInterval = config.getDuration("pruning-interval", MILLISECONDS).millis, + maxPruningDissemination = config.getDuration("max-pruning-dissemination", MILLISECONDS).millis) + } + + /** + * INTERNAL API + */ + private[akka] def roleOption(role: String): Option[String] = + if (role == "") None else Option(role) +} + +/** + * @param role Replicas are running on members tagged with this role. + * All members are used if undefined. + * @param gossipInterval How often the Replicator should send out gossip information. + * @param notifySubscribersInterval How often the subscribers will be notified + * of changes, if any. + * @param maxDeltaElements Maximum number of entries to transfer in one + * gossip message when synchronizing the replicas. Next chunk will be + * transferred in next round of gossip. + * @param dispatcher Id of the dispatcher to use for Replicator actors. If not + * specified (`""`) the default dispatcher is used. + * @param pruningInterval How often the Replicator checks for pruning of + * data associated with removed cluster nodes. + * @param maxPruningDissemination How long time it takes (worst case) to spread + * the data to all other replica nodes. This is used when initiating and + * completing the pruning process of data associated with removed cluster nodes. + * The time measurement is stopped when any replica is unreachable, so it should + * be configured to worst case in a healthy cluster. + */ +final class ReplicatorSettings( + val role: Option[String], + val gossipInterval: FiniteDuration, + val notifySubscribersInterval: FiniteDuration, + val maxDeltaElements: Int, + val dispatcher: String, + val pruningInterval: FiniteDuration, + val maxPruningDissemination: FiniteDuration) { + + def withRole(role: String): ReplicatorSettings = copy(role = ReplicatorSettings.roleOption(role)) + + def withRole(role: Option[String]): ReplicatorSettings = copy(role = role) + + def withGossipInterval(gossipInterval: FiniteDuration): ReplicatorSettings = + copy(gossipInterval = gossipInterval) + + def withNotifySubscribersInterval(notifySubscribersInterval: FiniteDuration): ReplicatorSettings = + copy(notifySubscribersInterval = notifySubscribersInterval) + + def withMaxDeltaElements(maxDeltaElements: Int): ReplicatorSettings = + copy(maxDeltaElements = maxDeltaElements) + + def withDispatcher(dispatcher: String): ReplicatorSettings = { + val d = dispatcher match { + case "" ⇒ Dispatchers.DefaultDispatcherId + case id ⇒ id + } + copy(dispatcher = d) + } + + def withPruning(pruningInterval: FiniteDuration, maxPruningDissemination: FiniteDuration): ReplicatorSettings = + copy(pruningInterval = pruningInterval, maxPruningDissemination = maxPruningDissemination) + + private def copy( + role: Option[String] = role, + gossipInterval: FiniteDuration = gossipInterval, + notifySubscribersInterval: FiniteDuration = notifySubscribersInterval, + maxDeltaElements: Int = maxDeltaElements, + dispatcher: String = dispatcher, + pruningInterval: FiniteDuration = pruningInterval, + maxPruningDissemination: FiniteDuration = maxPruningDissemination): ReplicatorSettings = + new ReplicatorSettings(role, gossipInterval, notifySubscribersInterval, maxDeltaElements, dispatcher, + pruningInterval, maxPruningDissemination) +} + +object Replicator { + + /** + * Factory method for the [[akka.actor.Props]] of the [[Replicator]] actor. + */ + def props(settings: ReplicatorSettings): Props = + Props(new Replicator(settings)).withDeploy(Deploy.local).withDispatcher(settings.dispatcher) + + sealed trait ReadConsistency { + def timeout: FiniteDuration + } + case object ReadLocal extends ReadConsistency { + override def timeout: FiniteDuration = Duration.Zero + } + final case class ReadFrom(n: Int, timeout: FiniteDuration) extends ReadConsistency { + require(n >= 2, "ReadFrom n must be >= 2, use ReadLocal for n=1") + } + final case class ReadMajority(timeout: FiniteDuration) extends ReadConsistency + final case class ReadAll(timeout: FiniteDuration) extends ReadConsistency + + sealed trait WriteConsistency { + def timeout: FiniteDuration + } + case object WriteLocal extends WriteConsistency { + override def timeout: FiniteDuration = Duration.Zero + } + final case class WriteTo(n: Int, timeout: FiniteDuration) extends WriteConsistency { + require(n >= 2, "WriteTo n must be >= 2, use WriteLocal for n=1") + } + final case class WriteMajority(timeout: FiniteDuration) extends WriteConsistency + final case class WriteAll(timeout: FiniteDuration) extends WriteConsistency + + /** + * Java API: The `ReadLocal` instance + */ + def readLocal = ReadLocal + + /** + * Java API: The `WriteLocal` instance + */ + def writeLocal = WriteLocal + + /** + * INTERNAL API + */ + private[akka] case object GetKeyIds + + /** + * INTERNAL API + */ + private[akka] final case class GetKeyIdsResult(keyIds: Set[String]) { + /** + * Java API + */ + def getKeyIds: java.util.Set[String] = { + import scala.collection.JavaConverters._ + keyIds.asJava + } + } + + sealed trait Command[A <: ReplicatedData] { + def key: Key[A] + } + + /** + * Send this message to the local `Replicator` to retrieve a data value for the + * given `key`. The `Replicator` will reply with one of the [[GetResponse]] messages. + * + * The optional `request` context is included in the reply messages. This is a convenient + * way to pass contextual information (e.g. original sender) without having to use `ask` + * or maintain local correlation data structures. + */ + final case class Get[A <: ReplicatedData](key: Key[A], consistency: ReadConsistency, request: Option[Any] = None) + extends Command[A] with ReplicatorMessage { + /** + * Java API: `Get` value from local `Replicator`, i.e. `ReadLocal` consistency. + */ + def this(key: Key[A], consistency: ReadConsistency) = this(key, consistency, None) + } + sealed abstract class GetResponse[A <: ReplicatedData] extends NoSerializationVerificationNeeded { + def key: Key[A] + def request: Option[Any] + + /** Java API */ + def getRequest: Optional[Any] = Optional.ofNullable(request.orNull) + } + /** + * Reply from `Get`. The data value is retrieved with [[#get]] using the typed key. + */ + final case class GetSuccess[A <: ReplicatedData](key: Key[A], request: Option[Any])(data: A) + extends GetResponse[A] with ReplicatorMessage { + + /** + * The data value, with correct type. + * Scala pattern matching cannot infer the type from the `key` parameter. + */ + def get[T <: ReplicatedData](key: Key[T]): T = { + require(key == this.key, "wrong key used, must use contained key") + data.asInstanceOf[T] + } + + /** + * The data value. Use [[#get]] to get the fully typed value. + */ + def dataValue: A = data + } + final case class NotFound[A <: ReplicatedData](key: Key[A], request: Option[Any]) + extends GetResponse[A] with ReplicatorMessage + /** + * The [[Get]] request could not be fulfill according to the given + * [[ReadConsistency consistency level]] and [[ReadConsistency#timeout timeout]]. + */ + final case class GetFailure[A <: ReplicatedData](key: Key[A], request: Option[Any]) + extends GetResponse[A] with ReplicatorMessage + + /** + * Register a subscriber that will be notified with a [[Changed]] message + * when the value of the given `key` is changed. Current value is also + * sent as a [[Changed]] message to a new subscriber. + * + * Subscribers will be notified periodically with the configured `notify-subscribers-interval`, + * and it is also possible to send an explicit `FlushChanges` message to + * the `Replicator` to notify the subscribers immediately. + * + * The subscriber will automatically be unregistered if it is terminated. + * + * If the key is deleted the subscriber is notified with a [[DataDeleted]] + * message. + */ + final case class Subscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef) extends ReplicatorMessage + /** + * Unregister a subscriber. + * @see [[Replicator.Subscribe]] + */ + final case class Unsubscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef) extends ReplicatorMessage + /** + * The data value is retrieved with [[#get]] using the typed key. + * @see [[Replicator.Subscribe]] + */ + final case class Changed[A <: ReplicatedData](key: Key[A])(data: A) extends ReplicatorMessage { + /** + * The data value, with correct type. + * Scala pattern matching cannot infer the type from the `key` parameter. + */ + def get[T <: ReplicatedData](key: Key[T]): T = { + require(key == this.key, "wrong key used, must use contained key") + data.asInstanceOf[T] + } + + /** + * The data value. Use [[#get]] to get the fully typed value. + */ + def dataValue: A = data + } + + object Update { + + /** + * Modify value of local `Replicator` and replicate with given `writeConsistency`. + * + * The current value for the `key` is passed to the `modify` function. + * If there is no current data value for the `key` the `initial` value will be + * passed to the `modify` function. + * + * The optional `request` context is included in the reply messages. This is a convenient + * way to pass contextual information (e.g. original sender) without having to use `ask` + * or local correlation data structures. + */ + def apply[A <: ReplicatedData]( + key: Key[A], initial: A, writeConsistency: WriteConsistency, + request: Option[Any] = None)(modify: A ⇒ A): Update[A] = + Update(key, writeConsistency, request)(modifyWithInitial(initial, modify)) + + private def modifyWithInitial[A <: ReplicatedData](initial: A, modify: A ⇒ A): Option[A] ⇒ A = { + case Some(data) ⇒ modify(data) + case None ⇒ modify(initial) + } + } + /** + * Send this message to the local `Replicator` to update a data value for the + * given `key`. The `Replicator` will reply with one of the [[UpdateResponse]] messages. + * + * Note that the [[Replicator.Update$ companion]] object provides `apply` functions for convenient + * construction of this message. + * + * The current data value for the `key` is passed as parameter to the `modify` function. + * It is `None` if there is no value for the `key`, and otherwise `Some(data)`. The function + * is supposed to return the new value of the data, which will then be replicated according to + * the given `writeConsistency`. + * + * The `modify` function is called by the `Replicator` actor and must therefore be a pure + * function that only uses the data parameter and stable fields from enclosing scope. It must + * for example not access `sender()` reference of an enclosing actor. + */ + final case class Update[A <: ReplicatedData](key: Key[A], writeConsistency: WriteConsistency, + request: Option[Any])(val modify: Option[A] ⇒ A) + extends Command[A] with NoSerializationVerificationNeeded { + + /** + * Java API: Modify value of local `Replicator` and replicate with given `writeConsistency`. + * + * The current value for the `key` is passed to the `modify` function. + * If there is no current data value for the `key` the `initial` value will be + * passed to the `modify` function. + */ + def this( + key: Key[A], initial: A, writeConsistency: WriteConsistency, modify: JFunction[A, A]) = + this(key, writeConsistency, None)(Update.modifyWithInitial(initial, data ⇒ modify.apply(data))) + + /** + * Java API: Modify value of local `Replicator` and replicate with given `writeConsistency`. + * + * The current value for the `key` is passed to the `modify` function. + * If there is no current data value for the `key` the `initial` value will be + * passed to the `modify` function. + * + * The optional `request` context is included in the reply messages. This is a convenient + * way to pass contextual information (e.g. original sender) without having to use `ask` + * or local correlation data structures. + */ + def this( + key: Key[A], initial: A, writeConsistency: WriteConsistency, request: Optional[Any], modify: JFunction[A, A]) = + this(key, writeConsistency, Option(request.orElse(null)))(Update.modifyWithInitial(initial, data ⇒ modify.apply(data))) + + } + + sealed abstract class UpdateResponse[A <: ReplicatedData] extends NoSerializationVerificationNeeded { + def key: Key[A] + def request: Option[Any] + + /** Java API */ + def getRequest: Optional[Any] = Optional.ofNullable(request.orNull) + } + final case class UpdateSuccess[A <: ReplicatedData](key: Key[A], request: Option[Any]) extends UpdateResponse[A] + sealed abstract class UpdateFailure[A <: ReplicatedData] extends UpdateResponse[A] + + /** + * The direct replication of the [[Update]] could not be fulfill according to + * the given [[WriteConsistency consistency level]] and + * [[WriteConsistency#timeout timeout]]. + * + * The `Update` was still performed locally and possibly replicated to some nodes. + * It will eventually be disseminated to other replicas, unless the local replica + * crashes before it has been able to communicate with other replicas. + */ + final case class UpdateTimeout[A <: ReplicatedData](key: Key[A], request: Option[Any]) extends UpdateFailure[A] + /** + * If the `modify` function of the [[Update]] throws an exception the reply message + * will be this `ModifyFailure` message. The original exception is included as `cause`. + */ + final case class ModifyFailure[A <: ReplicatedData](key: Key[A], errorMessage: String, cause: Throwable, request: Option[Any]) + extends UpdateFailure[A] { + override def toString: String = s"ModifyFailure [$key]: $errorMessage" + } + + /** + * Send this message to the local `Replicator` to delete a data value for the + * given `key`. The `Replicator` will reply with one of the [[DeleteResponse]] messages. + */ + final case class Delete[A <: ReplicatedData](key: Key[A], consistency: WriteConsistency) extends Command[A] + + sealed trait DeleteResponse[A <: ReplicatedData] { + def key: Key[A] + } + final case class DeleteSuccess[A <: ReplicatedData](key: Key[A]) extends DeleteResponse[A] + final case class ReplicationDeleteFailure[A <: ReplicatedData](key: Key[A]) extends DeleteResponse[A] + final case class DataDeleted[A <: ReplicatedData](key: Key[A]) + extends RuntimeException with NoStackTrace with DeleteResponse[A] { + override def toString: String = s"DataDeleted [$key]" + } + + /** + * Get current number of replicas, including the local replica. + * Will reply to sender with [[ReplicaCount]]. + */ + final case object GetReplicaCount + + /** + * Java API: The `GetReplicaCount` instance + */ + def getReplicaCount = GetReplicaCount + + /** + * Current number of replicas. Reply to `GetReplicaCount`. + */ + final case class ReplicaCount(n: Int) + + /** + * Notify subscribers of changes now, otherwise they will be notified periodically + * with the configured `notify-subscribers-interval`. + */ + case object FlushChanges + + /** + * Java API: The `FlushChanges` instance + */ + def flushChanges = FlushChanges + + /** + * Marker trait for remote messages serialized by + * [[akka.cluster.ddata.protobuf.ReplicatorMessageSerializer]]. + */ + trait ReplicatorMessage extends Serializable + + /** + * INTERNAL API + */ + private[akka] object Internal { + + case object GossipTick + case object RemovedNodePruningTick + case object ClockTick + final case class Write(key: String, envelope: DataEnvelope) extends ReplicatorMessage + case object WriteAck extends ReplicatorMessage with DeadLetterSuppression + final case class Read(key: String) extends ReplicatorMessage + final case class ReadResult(envelope: Option[DataEnvelope]) extends ReplicatorMessage with DeadLetterSuppression + final case class ReadRepair(key: String, envelope: DataEnvelope) + case object ReadRepairAck + + // Gossip Status message contains SHA-1 digests of the data to determine when + // to send the full data + type Digest = ByteString + val DeletedDigest: Digest = ByteString.empty + val LazyDigest: Digest = ByteString(0) + val NotFoundDigest: Digest = ByteString(-1) + + final case class DataEnvelope( + data: ReplicatedData, + pruning: Map[UniqueAddress, PruningState] = Map.empty) + extends ReplicatorMessage { + + import PruningState._ + + def needPruningFrom(removedNode: UniqueAddress): Boolean = + data match { + case r: RemovedNodePruning ⇒ r.needPruningFrom(removedNode) + case _ ⇒ false + } + + def initRemovedNodePruning(removed: UniqueAddress, owner: UniqueAddress): DataEnvelope = { + copy(pruning = pruning.updated(removed, PruningState(owner, PruningInitialized(Set.empty)))) + } + + def prune(from: UniqueAddress): DataEnvelope = { + data match { + case dataWithRemovedNodePruning: RemovedNodePruning ⇒ + require(pruning.contains(from)) + val to = pruning(from).owner + val prunedData = dataWithRemovedNodePruning.prune(from, to) + copy(data = prunedData, pruning = pruning.updated(from, PruningState(to, PruningPerformed))) + case _ ⇒ this + } + + } + + def merge(other: DataEnvelope): DataEnvelope = + if (other.data == DeletedData) DeletedEnvelope + else { + var mergedRemovedNodePruning = other.pruning + for ((key, thisValue) ← pruning) { + mergedRemovedNodePruning.get(key) match { + case None ⇒ mergedRemovedNodePruning = mergedRemovedNodePruning.updated(key, thisValue) + case Some(thatValue) ⇒ + mergedRemovedNodePruning = mergedRemovedNodePruning.updated(key, thisValue merge thatValue) + } + } + + // cleanup both sides before merging, `merge((otherData: ReplicatedData)` will cleanup other.data + copy(data = cleaned(data, mergedRemovedNodePruning), pruning = mergedRemovedNodePruning).merge(other.data) + } + + def merge(otherData: ReplicatedData): DataEnvelope = + if (otherData == DeletedData) DeletedEnvelope + else copy(data = data merge cleaned(otherData, pruning).asInstanceOf[data.T]) + + private def cleaned(c: ReplicatedData, p: Map[UniqueAddress, PruningState]): ReplicatedData = p.foldLeft(c) { + case (c: RemovedNodePruning, (removed, PruningState(_, PruningPerformed))) ⇒ + if (c.needPruningFrom(removed)) c.pruningCleanup(removed) else c + case (c, _) ⇒ c + } + + def addSeen(node: Address): DataEnvelope = { + var changed = false + val newRemovedNodePruning = pruning.map { + case (removed, pruningState) ⇒ + val newPruningState = pruningState.addSeen(node) + changed = (newPruningState ne pruningState) || changed + (removed, newPruningState) + } + if (changed) copy(pruning = newRemovedNodePruning) + else this + } + } + + val DeletedEnvelope = DataEnvelope(DeletedData) + + case object DeletedData extends ReplicatedData with ReplicatedDataSerialization { + type T = ReplicatedData + override def merge(that: ReplicatedData): ReplicatedData = DeletedData + } + + final case class Status(digests: Map[String, Digest], chunk: Int, totChunks: Int) extends ReplicatorMessage { + override def toString: String = + (digests.map { + case (key, bytes) ⇒ key + " -> " + bytes.map(byte ⇒ f"$byte%02x").mkString("") + }).mkString("Status(", ", ", ")") + } + final case class Gossip(updatedData: Map[String, DataEnvelope], sendBack: Boolean) extends ReplicatorMessage + + } +} + +/** + * A replicated in-memory data store supporting low latency and high availability + * requirements. + * + * The `Replicator` actor takes care of direct replication and gossip based + * dissemination of Conflict Free Replicated Data Types (CRDTs) to replicas in the + * the cluster. + * The data types must be convergent CRDTs and implement [[ReplicatedData]], i.e. + * they provide a monotonic merge function and the state changes always converge. + * + * You can use your own custom [[ReplicatedData]] types, and several types are provided + * by this package, such as: + * + *
    + *
  • Counters: [[GCounter]], [[PNCounter]]
  • + *
  • Registers: [[LWWRegister]], [[Flag]]
  • + *
  • Sets: [[GSet]], [[ORSet]]
  • + *
  • Maps: [[ORMap]], [[LWWMap]], [[PNCounterMap]]
  • + *
+ * + * For good introduction to the CRDT subject watch the + * The Final Causal Frontier + * and Eventually Consistent Data Structures + * talk by Sean Cribbs and and the + * talk by Mark Shapiro + * and read the excellent paper + * A comprehensive study of Convergent and Commutative Replicated Data Types + * by Mark Shapiro et. al. + * + * The `Replicator` actor must be started on each node in the cluster, or group of + * nodes tagged with a specific role. It communicates with other `Replicator` instances + * with the same path (without address) that are running on other nodes . For convenience it + * can be used with the [[DistributedData]] extension. + * + * == Update == + * + * To modify and replicate a [[ReplicatedData]] value you send a [[Replicator.Update]] message + * to the the local `Replicator`. + * The current data value for the `key` of the `Update` is passed as parameter to the `modify` + * function of the `Update`. The function is supposed to return the new value of the data, which + * will then be replicated according to the given consistency level. + * + * The `modify` function is called by the `Replicator` actor and must therefore be a pure + * function that only uses the data parameter and stable fields from enclosing scope. It must + * for example not access `sender()` reference of an enclosing actor. + * + * `Update` is intended to only be sent from an actor running in same local `ActorSystem` as + * the `Replicator`, because the `modify` function is typically not serializable. + * + * You supply a write consistency level which has the following meaning: + *
    + *
  • `WriteLocal` the value will immediately only be written to the local replica, + * and later disseminated with gossip
  • + *
  • `WriteTo(n)` the value will immediately be written to at least `n` replicas, + * including the local replica
  • + *
  • `WriteMajority` the value will immediately be written to a majority of replicas, i.e. + * at least `N/2 + 1` replicas, where N is the number of nodes in the cluster + * (or cluster role group)
  • + *
  • `WriteAll` the value will immediately be written to all nodes in the cluster + * (or all nodes in the cluster role group)
  • + *
+ * + * As reply of the `Update` a [[Replicator.UpdateSuccess]] is sent to the sender of the + * `Update` if the value was successfully replicated according to the supplied consistency + * level within the supplied timeout. Otherwise a [[Replicator.UpdateFailure]] subclass is + * sent back. Note that a [[Replicator.UpdateTimeout]] reply does not mean that the update completely failed + * or was rolled back. It may still have been replicated to some nodes, and will eventually + * be replicated to all nodes with the gossip protocol. + * + * You will always see your own writes. For example if you send two `Update` messages + * changing the value of the same `key`, the `modify` function of the second message will + * see the change that was performed by the first `Update` message. + * + * In the `Update` message you can pass an optional request context, which the `Replicator` + * does not care about, but is included in the reply messages. This is a convenient + * way to pass contextual information (e.g. original sender) without having to use `ask` + * or local correlation data structures. + * + * == Get == + * + * To retrieve the current value of a data you send [[Replicator.Get]] message to the + * `Replicator`. You supply a consistency level which has the following meaning: + *
    + *
  • `ReadLocal` the value will only be read from the local replica
  • + *
  • `ReadFrom(n)` the value will be read and merged from `n` replicas, + * including the local replica
  • + *
  • `ReadMajority` the value will be read and merged from a majority of replicas, i.e. + * at least `N/2 + 1` replicas, where N is the number of nodes in the cluster + * (or cluster role group)
  • + *
  • `ReadAll` the value will be read and merged from all nodes in the cluster + * (or all nodes in the cluster role group)
  • + *
+ * + * As reply of the `Get` a [[Replicator.GetSuccess]] is sent to the sender of the + * `Get` if the value was successfully retrieved according to the supplied consistency + * level within the supplied timeout. Otherwise a [[Replicator.GetFailure]] is sent. + * If the key does not exist the reply will be [[Replicator.NotFound]]. + * + * You will always read your own writes. For example if you send a `Update` message + * followed by a `Get` of the same `key` the `Get` will retrieve the change that was + * performed by the preceding `Update` message. However, the order of the reply messages are + * not defined, i.e. in the previous example you may receive the `GetSuccess` before + * the `UpdateSuccess`. + * + * In the `Get` message you can pass an optional request context in the same way as for the + * `Update` message, described above. For example the original sender can be passed and replied + * to after receiving and transforming `GetSuccess`. + * + * == Subscribe == + * + * You may also register interest in change notifications by sending [[Replicator.Subscribe]] + * message to the `Replicator`. It will send [[Replicator.Changed]] messages to the registered + * subscriber when the data for the subscribed key is updated. Subscribers will be notified + * periodically with the configured `notify-subscribers-interval`, and it is also possible to + * send an explicit `Replicator.FlushChanges` message to the `Replicator` to notify the subscribers + * immediately. + * + * The subscriber is automatically removed if the subscriber is terminated. A subscriber can + * also be deregistered with the [[Replicator.Unsubscribe]] message. + * + * == Delete == + * + * A data entry can be deleted by sending a [[Replicator.Delete]] message to the local + * local `Replicator`. As reply of the `Delete` a [[Replicator.DeleteSuccess]] is sent to + * the sender of the `Delete` if the value was successfully deleted according to the supplied + * consistency level within the supplied timeout. Otherwise a [[Replicator.ReplicationDeleteFailure]] + * is sent. Note that `ReplicationDeleteFailure` does not mean that the delete completely failed or + * was rolled back. It may still have been replicated to some nodes, and may eventually be replicated + * to all nodes. + * + * A deleted key cannot be reused again, but it is still recommended to delete unused + * data entries because that reduces the replication overhead when new nodes join the cluster. + * Subsequent `Delete`, `Update` and `Get` requests will be replied with [[Replicator.DataDeleted]]. + * Subscribers will receive [[Replicator.DataDeleted]]. + * + * == CRDT Garbage == + * + * One thing that can be problematic with CRDTs is that some data types accumulate history (garbage). + * For example a `GCounter` keeps track of one counter per node. If a `GCounter` has been updated + * from one node it will associate the identifier of that node forever. That can become a problem + * for long running systems with many cluster nodes being added and removed. To solve this problem + * the `Replicator` performs pruning of data associated with nodes that have been removed from the + * cluster. Data types that need pruning have to implement [[RemovedNodePruning]]. The pruning consists + * of several steps: + *
    + *
  1. When a node is removed from the cluster it is first important that all updates that were + * done by that node are disseminated to all other nodes. The pruning will not start before the + * `maxPruningDissemination` duration has elapsed. The time measurement is stopped when any + * replica is unreachable, so it should be configured to worst case in a healthy cluster.
  2. + *
  3. The nodes are ordered by their address and the node ordered first is called leader. + * The leader initiates the pruning by adding a `PruningInitialized` marker in the data envelope. + * This is gossiped to all other nodes and they mark it as seen when they receive it.
  4. + *
  5. When the leader sees that all other nodes have seen the `PruningInitialized` marker + * the leader performs the pruning and changes the marker to `PruningPerformed` so that nobody + * else will redo the pruning. The data envelope with this pruning state is a CRDT itself. + * The pruning is typically performed by "moving" the part of the data associated with + * the removed node to the leader node. For example, a `GCounter` is a `Map` with the node as key + * and the counts done by that node as value. When pruning the value of the removed node is + * moved to the entry owned by the leader node. See [[RemovedNodePruning#prune]].
  6. + *
  7. Thereafter the data is always cleared from parts associated with the removed node so that + * it does not come back when merging. See [[RemovedNodePruning#pruningCleanup]]
  8. + *
  9. After another `maxPruningDissemination` duration after pruning the last entry from the + * removed node the `PruningPerformed` markers in the data envelope are collapsed into a + * single tombstone entry, for efficiency. Clients may continue to use old data and therefore + * all data are always cleared from parts associated with tombstoned nodes.
  10. + *
+ */ +final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLogging { + + import Replicator._ + import Replicator.Internal._ + import PruningState._ + import settings._ + + val cluster = Cluster(context.system) + val selfAddress = cluster.selfAddress + val selfUniqueAddress = cluster.selfUniqueAddress + + require(!cluster.isTerminated, "Cluster node must not be terminated") + require(role.forall(cluster.selfRoles.contains), + s"This cluster member [${selfAddress}] doesn't have the role [$role]") + + //Start periodic gossip to random nodes in cluster + import context.dispatcher + val gossipTask = context.system.scheduler.schedule(gossipInterval, gossipInterval, self, GossipTick) + val notifyTask = context.system.scheduler.schedule(notifySubscribersInterval, notifySubscribersInterval, self, FlushChanges) + val pruningTask = context.system.scheduler.schedule(pruningInterval, pruningInterval, self, RemovedNodePruningTick) + val clockTask = context.system.scheduler.schedule(gossipInterval, gossipInterval, self, ClockTick) + + val serializer = SerializationExtension(context.system).serializerFor(classOf[DataEnvelope]) + val maxPruningDisseminationNanos = maxPruningDissemination.toNanos + + // cluster nodes, doesn't contain selfAddress + var nodes: Set[Address] = Set.empty + + // nodes removed from cluster, to be pruned, and tombstoned + var removedNodes: Map[UniqueAddress, Long] = Map.empty + var pruningPerformed: Map[UniqueAddress, Long] = Map.empty + var tombstoneNodes: Set[UniqueAddress] = Set.empty + + var leader: Option[Address] = None + def isLeader: Boolean = leader.exists(_ == selfAddress) + + // for pruning timeouts are based on clock that is only increased when all nodes are reachable + var previousClockTime = System.nanoTime() + var allReachableClockTime = 0L + var unreachable = Set.empty[Address] + + // the actual data + var dataEntries = Map.empty[String, (DataEnvelope, Digest)] + // keys that have changed, Changed event published to subscribers on FlushChanges + var changed = Set.empty[String] + + // for splitting up gossip in chunks + var statusCount = 0L + var statusTotChunks = 0 + + val subscribers = new mutable.HashMap[String, mutable.Set[ActorRef]] with mutable.MultiMap[String, ActorRef] + val newSubscribers = new mutable.HashMap[String, mutable.Set[ActorRef]] with mutable.MultiMap[String, ActorRef] + var subscriptionKeys = Map.empty[String, KeyR] + + override def preStart(): Unit = { + val leaderChangedClass = if (role.isDefined) classOf[RoleLeaderChanged] else classOf[LeaderChanged] + cluster.subscribe(self, initialStateMode = InitialStateAsEvents, + classOf[MemberEvent], classOf[ReachabilityEvent], leaderChangedClass) + } + + override def postStop(): Unit = { + cluster.unsubscribe(self) + gossipTask.cancel() + notifyTask.cancel() + pruningTask.cancel() + clockTask.cancel() + } + + def matchingRole(m: Member): Boolean = role.forall(m.hasRole) + + def receive = normalReceive + + val normalReceive: Receive = { + case Get(key, consistency, req) ⇒ receiveGet(key, consistency, req) + case u @ Update(key, writeC, req) ⇒ receiveUpdate(key, u.modify, writeC, req) + case Read(key) ⇒ receiveRead(key) + case Write(key, envelope) ⇒ receiveWrite(key, envelope) + case ReadRepair(key, envelope) ⇒ receiveReadRepair(key, envelope) + case FlushChanges ⇒ receiveFlushChanges() + case GossipTick ⇒ receiveGossipTick() + case ClockTick ⇒ receiveClockTick() + case Status(otherDigests, chunk, totChunks) ⇒ receiveStatus(otherDigests, chunk, totChunks) + case Gossip(updatedData, sendBack) ⇒ receiveGossip(updatedData, sendBack) + case Subscribe(key, subscriber) ⇒ receiveSubscribe(key, subscriber) + case Unsubscribe(key, subscriber) ⇒ receiveUnsubscribe(key, subscriber) + case Terminated(ref) ⇒ receiveTerminated(ref) + case MemberUp(m) ⇒ receiveMemberUp(m) + case MemberRemoved(m, _) ⇒ receiveMemberRemoved(m) + case _: MemberEvent ⇒ // not of interest + case UnreachableMember(m) ⇒ receiveUnreachable(m) + case ReachableMember(m) ⇒ receiveReachable(m) + case LeaderChanged(leader) ⇒ receiveLeaderChanged(leader, None) + case RoleLeaderChanged(role, leader) ⇒ receiveLeaderChanged(leader, Some(role)) + case GetKeyIds ⇒ receiveGetKeyIds() + case Delete(key, consistency) ⇒ receiveDelete(key, consistency) + case RemovedNodePruningTick ⇒ receiveRemovedNodePruningTick() + case GetReplicaCount ⇒ receiveGetReplicaCount() + } + + def receiveGet(key: KeyR, consistency: ReadConsistency, req: Option[Any]): Unit = { + val localValue = getData(key.id) + log.debug("Received Get for key [{}], local data [{}]", key, localValue) + if (isLocalGet(consistency)) { + val reply = localValue match { + case Some(DataEnvelope(DeletedData, _)) ⇒ DataDeleted(key) + case Some(DataEnvelope(data, _)) ⇒ GetSuccess(key, req)(data) + case None ⇒ NotFound(key, req) + } + sender() ! reply + } else + context.actorOf(ReadAggregator.props(key, consistency, req, nodes, localValue, sender()) + .withDispatcher(context.props.dispatcher)) + } + + def isLocalGet(readConsistency: ReadConsistency): Boolean = + readConsistency match { + case ReadLocal ⇒ true + case _: ReadMajority | _: ReadAll ⇒ nodes.isEmpty + case _ ⇒ false + } + + def receiveRead(key: String): Unit = { + sender() ! ReadResult(getData(key)) + } + + def isLocalSender(): Boolean = !sender().path.address.hasGlobalScope + + def receiveUpdate(key: KeyR, modify: Option[ReplicatedData] ⇒ ReplicatedData, + writeConsistency: WriteConsistency, req: Option[Any]): Unit = { + val localValue = getData(key.id) + Try { + localValue match { + case Some(DataEnvelope(DeletedData, _)) ⇒ throw new DataDeleted(key) + case Some(envelope @ DataEnvelope(existing, _)) ⇒ modify(Some(existing)) + case None ⇒ modify(None) + } + } match { + case Success(newData) ⇒ + log.debug("Received Update for key [{}], old data [{}], new data [{}]", key, localValue, newData) + val envelope = DataEnvelope(pruningCleanupTombstoned(newData)) + setData(key.id, envelope) + if (isLocalUpdate(writeConsistency)) + sender() ! UpdateSuccess(key, req) + else + context.actorOf(WriteAggregator.props(key, envelope, writeConsistency, req, nodes, sender()) + .withDispatcher(context.props.dispatcher)) + case Failure(e: DataDeleted[_]) ⇒ + log.debug("Received Update for deleted key [{}]", key) + sender() ! e + case Failure(e) ⇒ + log.debug("Received Update for key [{}], failed: {}", key, e.getMessage) + sender() ! ModifyFailure(key, "Update failed: " + e.getMessage, e, req) + } + } + + def isLocalUpdate(writeConsistency: WriteConsistency): Boolean = + writeConsistency match { + case WriteLocal ⇒ true + case _: WriteMajority | _: WriteAll ⇒ nodes.isEmpty + case _ ⇒ false + } + + def receiveWrite(key: String, envelope: DataEnvelope): Unit = { + write(key, envelope) + sender() ! WriteAck + } + + def write(key: String, writeEnvelope: DataEnvelope): Unit = + getData(key) match { + case Some(DataEnvelope(DeletedData, _)) ⇒ // already deleted + case Some(envelope @ DataEnvelope(existing, _)) ⇒ + if (existing.getClass == writeEnvelope.data.getClass || writeEnvelope.data == DeletedData) { + val merged = envelope.merge(pruningCleanupTombstoned(writeEnvelope)).addSeen(selfAddress) + setData(key, merged) + } else { + log.warning("Wrong type for writing [{}], existing type [{}], got [{}]", + key, existing.getClass.getName, writeEnvelope.data.getClass.getName) + } + case None ⇒ + setData(key, pruningCleanupTombstoned(writeEnvelope).addSeen(selfAddress)) + } + + def receiveReadRepair(key: String, writeEnvelope: DataEnvelope): Unit = { + write(key, writeEnvelope) + sender() ! ReadRepairAck + } + + def receiveGetKeyIds(): Unit = { + val keys: Set[String] = dataEntries.collect { + case (key, (DataEnvelope(data, _), _)) if data != DeletedData ⇒ key + }(collection.breakOut) + sender() ! GetKeyIdsResult(keys) + } + + def receiveDelete(key: KeyR, consistency: WriteConsistency): Unit = { + getData(key.id) match { + case Some(DataEnvelope(DeletedData, _)) ⇒ + // already deleted + sender() ! DataDeleted(key) + case _ ⇒ + setData(key.id, DeletedEnvelope) + if (isLocalUpdate(consistency)) + sender() ! DeleteSuccess(key) + else + context.actorOf(WriteAggregator.props(key, DeletedEnvelope, consistency, None, nodes, sender()) + .withDispatcher(context.props.dispatcher)) + } + } + + def setData(key: String, envelope: DataEnvelope): Unit = { + // notify subscribers, later + changed += key + + val dig = if (envelope.data == DeletedData) DeletedDigest else LazyDigest + dataEntries = dataEntries.updated(key, (envelope, dig)) + } + + def getDigest(key: String): Digest = { + dataEntries.get(key) match { + case Some((envelope, LazyDigest)) ⇒ + val d = digest(envelope) + dataEntries = dataEntries.updated(key, (envelope, d)) + d + case Some((_, digest)) ⇒ digest + case None ⇒ NotFoundDigest + } + } + + def digest(envelope: DataEnvelope): Digest = { + val bytes = serializer.toBinary(envelope) + ByteString.fromArray(MessageDigest.getInstance("SHA-1").digest(bytes)) + } + + def getData(key: String): Option[DataEnvelope] = dataEntries.get(key).map { case (envelope, _) ⇒ envelope } + + def receiveFlushChanges(): Unit = { + def notify(keyId: String, subs: mutable.Set[ActorRef]): Unit = { + val key = subscriptionKeys(keyId) + getData(keyId) match { + case Some(envelope) ⇒ + val msg = if (envelope.data == DeletedData) DataDeleted(key) else Changed(key)(envelope.data) + subs.foreach { _ ! msg } + case None ⇒ + } + } + + if (subscribers.nonEmpty) { + for (key ← changed; if subscribers.contains(key); subs ← subscribers.get(key)) + notify(key, subs) + } + + // Changed event is sent to new subscribers even though the key has not changed, + // i.e. send current value + if (newSubscribers.nonEmpty) { + for ((key, subs) ← newSubscribers) { + notify(key, subs) + subs.foreach { subscribers.addBinding(key, _) } + } + newSubscribers.clear() + } + + changed = Set.empty[String] + } + + def receiveGossipTick(): Unit = selectRandomNode(nodes.toVector) foreach gossipTo + + def gossipTo(address: Address): Unit = { + val to = replica(address) + if (dataEntries.size <= maxDeltaElements) { + val status = Status(dataEntries.map { case (key, (_, _)) ⇒ (key, getDigest(key)) }, chunk = 0, totChunks = 1) + to ! status + } else { + val totChunks = dataEntries.size / maxDeltaElements + for (_ ← 1 to math.min(totChunks, 10)) { + if (totChunks == statusTotChunks) + statusCount += 1 + else { + statusCount = ThreadLocalRandom.current.nextInt(0, totChunks) + statusTotChunks = totChunks + } + val chunk = (statusCount % totChunks).toInt + val status = Status(dataEntries.collect { + case (key, (_, _)) if math.abs(key.hashCode) % totChunks == chunk ⇒ (key, getDigest(key)) + }, chunk, totChunks) + to ! status + } + } + } + + def selectRandomNode(addresses: immutable.IndexedSeq[Address]): Option[Address] = + if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current nextInt addresses.size)) + + def replica(address: Address): ActorSelection = + context.actorSelection(self.path.toStringWithAddress(address)) + + def receiveStatus(otherDigests: Map[String, Digest], chunk: Int, totChunks: Int): Unit = { + if (log.isDebugEnabled) + log.debug("Received gossip status from [{}], chunk [{}] of [{}] containing [{}]", sender().path.address, + chunk, totChunks, otherDigests.keys.mkString(", ")) + + def isOtherDifferent(key: String, otherDigest: Digest): Boolean = { + val d = getDigest(key) + d != NotFoundDigest && d != otherDigest + } + val otherDifferentKeys = otherDigests.collect { + case (key, otherDigest) if isOtherDifferent(key, otherDigest) ⇒ key + } + val otherKeys = otherDigests.keySet + val myKeys = + if (totChunks == 1) dataEntries.keySet + else dataEntries.keysIterator.filter(_.hashCode % totChunks == chunk).toSet + val otherMissingKeys = myKeys -- otherKeys + val keys = (otherDifferentKeys ++ otherMissingKeys).take(maxDeltaElements) + if (keys.nonEmpty) { + if (log.isDebugEnabled) + log.debug("Sending gossip to [{}], containing [{}]", sender().path.address, keys.mkString(", ")) + val g = Gossip(keys.map(k ⇒ k -> getData(k).get)(collection.breakOut), sendBack = otherDifferentKeys.nonEmpty) + sender() ! g + } + val myMissingKeys = otherKeys -- myKeys + if (myMissingKeys.nonEmpty) { + if (log.isDebugEnabled) + log.debug("Sending gossip status to [{}], requesting missing [{}]", sender().path.address, myMissingKeys.mkString(", ")) + val status = Status(myMissingKeys.map(k ⇒ k -> NotFoundDigest)(collection.breakOut), chunk, totChunks) + sender() ! status + } + } + + def receiveGossip(updatedData: Map[String, DataEnvelope], sendBack: Boolean): Unit = { + if (log.isDebugEnabled) + log.debug("Received gossip from [{}], containing [{}]", sender().path.address, updatedData.keys.mkString(", ")) + var replyData = Map.empty[String, DataEnvelope] + updatedData.foreach { + case (key, envelope) ⇒ + val hadData = dataEntries.contains(key) + write(key, envelope) + if (sendBack) getData(key) match { + case Some(d) ⇒ + if (hadData || d.pruning.nonEmpty) + replyData = replyData.updated(key, d) + case None ⇒ + } + } + if (sendBack && replyData.nonEmpty) + sender() ! Gossip(replyData, sendBack = false) + } + + def receiveSubscribe(key: KeyR, subscriber: ActorRef): Unit = { + newSubscribers.addBinding(key.id, subscriber) + if (!subscriptionKeys.contains(key.id)) + subscriptionKeys = subscriptionKeys.updated(key.id, key) + context.watch(subscriber) + } + + def receiveUnsubscribe(key: KeyR, subscriber: ActorRef): Unit = { + subscribers.removeBinding(key.id, subscriber) + newSubscribers.removeBinding(key.id, subscriber) + if (!hasSubscriber(subscriber)) + context.unwatch(subscriber) + if (!subscribers.contains(key.id) && !newSubscribers.contains(key.id)) + subscriptionKeys -= key.id + } + + def hasSubscriber(subscriber: ActorRef): Boolean = + (subscribers.exists { case (k, s) ⇒ s.contains(subscriber) }) || + (newSubscribers.exists { case (k, s) ⇒ s.contains(subscriber) }) + + def receiveTerminated(ref: ActorRef): Unit = { + val keys1 = subscribers.collect { case (k, s) if s.contains(ref) ⇒ k } + keys1.foreach { key ⇒ subscribers.removeBinding(key, ref) } + val keys2 = newSubscribers.collect { case (k, s) if s.contains(ref) ⇒ k } + keys2.foreach { key ⇒ newSubscribers.removeBinding(key, ref) } + + (keys1 ++ keys2).foreach { key ⇒ + if (!subscribers.contains(key) && !newSubscribers.contains(key)) + subscriptionKeys -= key + } + } + + def receiveMemberUp(m: Member): Unit = + if (matchingRole(m) && m.address != selfAddress) + nodes += m.address + + def receiveMemberRemoved(m: Member): Unit = { + if (m.address == selfAddress) + context stop self + else if (matchingRole(m)) { + nodes -= m.address + removedNodes = removedNodes.updated(m.uniqueAddress, allReachableClockTime) + unreachable -= m.address + } + } + + def receiveUnreachable(m: Member): Unit = + if (matchingRole(m)) unreachable += m.address + + def receiveReachable(m: Member): Unit = + if (matchingRole(m)) unreachable -= m.address + + def receiveLeaderChanged(leaderOption: Option[Address], roleOption: Option[String]): Unit = + if (roleOption == role) leader = leaderOption + + def receiveClockTick(): Unit = { + val now = System.nanoTime() + if (unreachable.isEmpty) + allReachableClockTime += (now - previousClockTime) + previousClockTime = now + } + + def receiveRemovedNodePruningTick(): Unit = { + if (isLeader && removedNodes.nonEmpty) { + initRemovedNodePruning() + } + performRemovedNodePruning() + tombstoneRemovedNodePruning() + } + + def initRemovedNodePruning(): Unit = { + // initiate pruning for removed nodes + val removedSet: Set[UniqueAddress] = removedNodes.collect { + case (r, t) if ((allReachableClockTime - t) > maxPruningDisseminationNanos) ⇒ r + }(collection.breakOut) + + if (removedSet.nonEmpty) { + for ((key, (envelope, _)) ← dataEntries; removed ← removedSet) { + + def init(): Unit = { + val newEnvelope = envelope.initRemovedNodePruning(removed, selfUniqueAddress) + log.debug("Initiated pruning of [{}] for data key [{}]", removed, key) + setData(key, newEnvelope) + } + + if (envelope.needPruningFrom(removed)) { + envelope.data match { + case dataWithRemovedNodePruning: RemovedNodePruning ⇒ + + envelope.pruning.get(removed) match { + case None ⇒ init() + case Some(PruningState(owner, PruningInitialized(_))) if owner != selfUniqueAddress ⇒ init() + case _ ⇒ // already in progress + } + case _ ⇒ + } + } + } + } + } + + def performRemovedNodePruning(): Unit = { + // perform pruning when all seen Init + dataEntries.foreach { + case (key, (envelope @ DataEnvelope(data: RemovedNodePruning, pruning), _)) ⇒ + pruning.foreach { + case (removed, PruningState(owner, PruningInitialized(seen))) if owner == selfUniqueAddress + && (nodes.isEmpty || nodes.forall(seen)) ⇒ + val newEnvelope = envelope.prune(removed) + pruningPerformed = pruningPerformed.updated(removed, allReachableClockTime) + log.debug("Perform pruning of [{}] from [{}] to [{}]", key, removed, selfUniqueAddress) + setData(key, newEnvelope) + case _ ⇒ + } + case _ ⇒ // deleted, or pruning not needed + } + } + + def tombstoneRemovedNodePruning(): Unit = { + + def allPruningPerformed(removed: UniqueAddress): Boolean = { + dataEntries forall { + case (key, (envelope @ DataEnvelope(data: RemovedNodePruning, pruning), _)) ⇒ + pruning.get(removed) match { + case Some(PruningState(_, PruningInitialized(_))) ⇒ false + case _ ⇒ true + } + case _ ⇒ true // deleted, or pruning not needed + } + } + + pruningPerformed.foreach { + case (removed, timestamp) if ((allReachableClockTime - timestamp) > maxPruningDisseminationNanos) && + allPruningPerformed(removed) ⇒ + log.debug("All pruning performed for [{}], tombstoned", removed) + pruningPerformed -= removed + removedNodes -= removed + tombstoneNodes += removed + dataEntries.foreach { + case (key, (envelope @ DataEnvelope(data: RemovedNodePruning, _), _)) ⇒ + setData(key, pruningCleanupTombstoned(removed, envelope)) + case _ ⇒ // deleted, or pruning not needed + } + case (removed, timestamp) ⇒ // not ready + } + } + + def pruningCleanupTombstoned(envelope: DataEnvelope): DataEnvelope = + tombstoneNodes.foldLeft(envelope)((c, removed) ⇒ pruningCleanupTombstoned(removed, c)) + + def pruningCleanupTombstoned(removed: UniqueAddress, envelope: DataEnvelope): DataEnvelope = { + val pruningCleanuped = pruningCleanupTombstoned(removed, envelope.data) + if ((pruningCleanuped ne envelope.data) || envelope.pruning.contains(removed)) + envelope.copy(data = pruningCleanuped, pruning = envelope.pruning - removed) + else + envelope + } + + def pruningCleanupTombstoned(data: ReplicatedData): ReplicatedData = + if (tombstoneNodes.isEmpty) data + else tombstoneNodes.foldLeft(data)((c, removed) ⇒ pruningCleanupTombstoned(removed, c)) + + def pruningCleanupTombstoned(removed: UniqueAddress, data: ReplicatedData): ReplicatedData = + data match { + case dataWithRemovedNodePruning: RemovedNodePruning ⇒ + if (dataWithRemovedNodePruning.needPruningFrom(removed)) dataWithRemovedNodePruning.pruningCleanup(removed) else data + case _ ⇒ data + } + + def receiveGetReplicaCount(): Unit = { + // selfAddress is not included in the set + sender() ! ReplicaCount(nodes.size + 1) + } + +} + +/** + * INTERNAL API + */ +private[akka] object ReadWriteAggregator { + case object SendToSecondary + val MaxSecondaryNodes = 10 +} + +/** + * INTERNAL API + */ +private[akka] abstract class ReadWriteAggregator extends Actor { + import Replicator.Internal._ + import ReadWriteAggregator._ + + def timeout: FiniteDuration + def nodes: Set[Address] + + import context.dispatcher + var sendToSecondarySchedule = context.system.scheduler.scheduleOnce(timeout / 5, self, SendToSecondary) + var timeoutSchedule = context.system.scheduler.scheduleOnce(timeout, self, ReceiveTimeout) + + var remaining = nodes + + def doneWhenRemainingSize: Int + + lazy val (primaryNodes, secondaryNodes) = { + val primarySize = nodes.size - doneWhenRemainingSize + if (primarySize >= nodes.size) + (nodes, Set.empty[Address]) + else { + val (p, s) = scala.util.Random.shuffle(nodes.toVector).splitAt(primarySize) + (p, s.take(MaxSecondaryNodes)) + } + } + + override def postStop(): Unit = { + sendToSecondarySchedule.cancel() + timeoutSchedule.cancel() + } + + def replica(address: Address): ActorSelection = + context.actorSelection(context.parent.path.toStringWithAddress(address)) + +} + +/** + * INTERNAL API + */ +private[akka] object WriteAggregator { + def props( + key: KeyR, + envelope: Replicator.Internal.DataEnvelope, + consistency: Replicator.WriteConsistency, + req: Option[Any], + nodes: Set[Address], + replyTo: ActorRef): Props = + Props(new WriteAggregator(key, envelope, consistency, req, nodes, replyTo)) + .withDeploy(Deploy.local) +} + +/** + * INTERNAL API + */ +private[akka] class WriteAggregator( + key: KeyR, + envelope: Replicator.Internal.DataEnvelope, + consistency: Replicator.WriteConsistency, + req: Option[Any], + override val nodes: Set[Address], + replyTo: ActorRef) extends ReadWriteAggregator { + + import Replicator._ + import Replicator.Internal._ + import ReadWriteAggregator._ + + override def timeout: FiniteDuration = consistency.timeout + + override val doneWhenRemainingSize = consistency match { + case WriteTo(n, _) ⇒ nodes.size - (n - 1) + case _: WriteAll ⇒ 0 + case _: WriteMajority ⇒ + val N = nodes.size + 1 + val w = N / 2 + 1 // write to at least (N/2+1) nodes + N - w + case WriteLocal ⇒ + throw new IllegalArgumentException("ReadLocal not supported by WriteAggregator") + } + + val writeMsg = Write(key.id, envelope) + + override def preStart(): Unit = { + primaryNodes.foreach { replica(_) ! writeMsg } + + if (remaining.size == doneWhenRemainingSize) + reply(ok = true) + else if (doneWhenRemainingSize < 0 || remaining.size < doneWhenRemainingSize) + reply(ok = false) + } + + def receive = { + case WriteAck ⇒ + remaining -= sender().path.address + if (remaining.size == doneWhenRemainingSize) + reply(ok = true) + case SendToSecondary ⇒ + secondaryNodes.foreach { replica(_) ! writeMsg } + case ReceiveTimeout ⇒ reply(ok = false) + } + + def reply(ok: Boolean): Unit = { + if (ok && envelope.data == DeletedData) + replyTo.tell(DeleteSuccess(key), context.parent) + else if (ok) + replyTo.tell(UpdateSuccess(key, req), context.parent) + else if (envelope.data == DeletedData) + replyTo.tell(ReplicationDeleteFailure(key), context.parent) + else + replyTo.tell(UpdateTimeout(key, req), context.parent) + context.stop(self) + } +} + +/** + * INTERNAL API + */ +private[akka] object ReadAggregator { + def props( + key: KeyR, + consistency: Replicator.ReadConsistency, + req: Option[Any], + nodes: Set[Address], + localValue: Option[Replicator.Internal.DataEnvelope], + replyTo: ActorRef): Props = + Props(new ReadAggregator(key, consistency, req, nodes, localValue, replyTo)) + .withDeploy(Deploy.local) + +} + +/** + * INTERNAL API + */ +private[akka] class ReadAggregator( + key: KeyR, + consistency: Replicator.ReadConsistency, + req: Option[Any], + override val nodes: Set[Address], + localValue: Option[Replicator.Internal.DataEnvelope], + replyTo: ActorRef) extends ReadWriteAggregator { + + import Replicator._ + import Replicator.Internal._ + import ReadWriteAggregator._ + + override def timeout: FiniteDuration = consistency.timeout + + var result = localValue + override val doneWhenRemainingSize = consistency match { + case ReadFrom(n, _) ⇒ nodes.size - (n - 1) + case _: ReadAll ⇒ 0 + case _: ReadMajority ⇒ + val N = nodes.size + 1 + val r = N / 2 + 1 // read from at least (N/2+1) nodes + N - r + case ReadLocal ⇒ + throw new IllegalArgumentException("ReadLocal not supported by ReadAggregator") + } + + val readMsg = Read(key.id) + + override def preStart(): Unit = { + primaryNodes.foreach { replica(_) ! readMsg } + + if (remaining.size == doneWhenRemainingSize) + reply(ok = true) + else if (doneWhenRemainingSize < 0 || remaining.size < doneWhenRemainingSize) + reply(ok = false) + } + + def receive = { + case ReadResult(envelope) ⇒ + result = (result, envelope) match { + case (Some(a), Some(b)) ⇒ Some(a.merge(b)) + case (r @ Some(_), None) ⇒ r + case (None, r @ Some(_)) ⇒ r + case (None, None) ⇒ None + } + remaining -= sender().path.address + if (remaining.size == doneWhenRemainingSize) + reply(ok = true) + case SendToSecondary ⇒ + secondaryNodes.foreach { replica(_) ! readMsg } + case ReceiveTimeout ⇒ reply(ok = false) + } + + def reply(ok: Boolean): Unit = + (ok, result) match { + case (true, Some(envelope)) ⇒ + context.parent ! ReadRepair(key.id, envelope) + // read-repair happens before GetSuccess + context.become(waitReadRepairAck(envelope)) + case (true, None) ⇒ + replyTo.tell(NotFound(key, req), context.parent) + context.stop(self) + case (false, _) ⇒ + replyTo.tell(GetFailure(key, req), context.parent) + context.stop(self) + } + + def waitReadRepairAck(envelope: Replicator.Internal.DataEnvelope): Receive = { + case ReadRepairAck ⇒ + val replyMsg = + if (envelope.data == DeletedData) DataDeleted(key) + else GetSuccess(key, req)(envelope.data) + replyTo.tell(replyMsg, context.parent) + context.stop(self) + case _: ReadResult ⇒ + //collect late replies + remaining -= sender().path.address + case SendToSecondary ⇒ + case ReceiveTimeout ⇒ + } +} + diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala new file mode 100644 index 0000000000..7b4e4e3ba9 --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala @@ -0,0 +1,226 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import java.util.concurrent.atomic.AtomicLong + +import scala.annotation.tailrec +import scala.collection.immutable.TreeMap + +import akka.cluster.Cluster +import akka.cluster.UniqueAddress + +/** + * VersionVector module with helper classes and methods. + */ +object VersionVector { + + val empty: VersionVector = new VersionVector(TreeMap.empty[UniqueAddress, Long]) + def apply(): VersionVector = empty + /** + * Java API + */ + def create(): VersionVector = empty + + sealed trait Ordering + case object After extends Ordering + case object Before extends Ordering + case object Same extends Ordering + case object Concurrent extends Ordering + /** + * Marker to ensure that we do a full order comparison instead of bailing out early. + */ + private case object FullOrder extends Ordering + + /** + * Java API: The `VersionVector.After` instance + */ + def AfterInstance = After + + /** + * Java API: The `VersionVector.Before` instance + */ + def BeforeInstance = Before + + /** + * Java API: The `VersionVector.Same` instance + */ + def SameInstance = Same + + /** + * Java API: The `VersionVector.Concurrent` instance + */ + def ConcurrentInstance = Concurrent + + private object Timestamp { + final val Zero = 0L + final val EndMarker = Long.MinValue + val counter = new AtomicLong(1L) + } + + /** + * Marker to signal that we have reached the end of a version vector. + */ + private val cmpEndMarker = (null, Timestamp.EndMarker) + +} + +/** + * Representation of a Vector-based clock (counting clock), inspired by Lamport logical clocks. + * {{{ + * Reference: + * 1) Leslie Lamport (1978). "Time, clocks, and the ordering of events in a distributed system". Communications of the ACM 21 (7): 558-565. + * 2) Friedemann Mattern (1988). "Virtual Time and Global States of Distributed Systems". Workshop on Parallel and Distributed Algorithms: pp. 215-226 + * }}} + * + * Based on code from `akka.cluster.VectorClock`. + * + * This class is immutable, i.e. "modifying" methods return a new instance. + */ +@SerialVersionUID(1L) +final case class VersionVector private[akka] ( + private[akka] val versions: TreeMap[UniqueAddress, Long]) + extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning { + + type T = VersionVector + + import VersionVector._ + + /** + * Increment the version for the node passed as argument. Returns a new VersionVector. + */ + def +(node: Cluster): VersionVector = increment(node) + + /** + * INTERNAL API + * Increment the version for the node passed as argument. Returns a new VersionVector. + */ + private[akka] def +(node: UniqueAddress): VersionVector = increment(node) + + /** + * Increment the version for the node passed as argument. Returns a new VersionVector. + */ + def increment(node: Cluster): VersionVector = increment(node.selfUniqueAddress) + + /** + * INTERNAL API + * Increment the version for the node passed as argument. Returns a new VersionVector. + */ + private[akka] def increment(node: UniqueAddress): VersionVector = + copy(versions = versions.updated(node, Timestamp.counter.getAndIncrement())) + + /** + * Returns true if this and that are concurrent else false. + */ + def <>(that: VersionVector): Boolean = compareOnlyTo(that, Concurrent) eq Concurrent + + /** + * Returns true if this is before that else false. + */ + def <(that: VersionVector): Boolean = compareOnlyTo(that, Before) eq Before + + /** + * Returns true if this is after that else false. + */ + def >(that: VersionVector): Boolean = compareOnlyTo(that, After) eq After + + /** + * Returns true if this VersionVector has the same history as the 'that' VersionVector else false. + */ + def ==(that: VersionVector): Boolean = compareOnlyTo(that, Same) eq Same + + /** + * Version vector comparison according to the semantics described by compareTo, with the ability to bail + * out early if the we can't reach the Ordering that we are looking for. + * + * The ordering always starts with Same and can then go to Same, Before or After + * If we're on After we can only go to After or Concurrent + * If we're on Before we can only go to Before or Concurrent + * If we go to Concurrent we exit the loop immediately + * + * If you send in the ordering FullOrder, you will get a full comparison. + */ + private final def compareOnlyTo(that: VersionVector, order: Ordering): Ordering = { + def nextOrElse[A](iter: Iterator[A], default: A): A = if (iter.hasNext) iter.next() else default + + def compare(i1: Iterator[(UniqueAddress, Long)], i2: Iterator[(UniqueAddress, Long)], requestedOrder: Ordering): Ordering = { + @tailrec + def compareNext(nt1: (UniqueAddress, Long), nt2: (UniqueAddress, Long), currentOrder: Ordering): Ordering = + if ((requestedOrder ne FullOrder) && (currentOrder ne Same) && (currentOrder ne requestedOrder)) currentOrder + else if ((nt1 eq cmpEndMarker) && (nt2 eq cmpEndMarker)) currentOrder + // i1 is empty but i2 is not, so i1 can only be Before + else if (nt1 eq cmpEndMarker) { if (currentOrder eq After) Concurrent else Before } + // i2 is empty but i1 is not, so i1 can only be After + else if (nt2 eq cmpEndMarker) { if (currentOrder eq Before) Concurrent else After } + else { + // compare the nodes + val nc = nt1._1 compareTo nt2._1 + if (nc == 0) { + // both nodes exist compare the timestamps + // same timestamp so just continue with the next nodes + if (nt1._2 == nt2._2) compareNext(nextOrElse(i1, cmpEndMarker), nextOrElse(i2, cmpEndMarker), currentOrder) + else if (nt1._2 < nt2._2) { + // t1 is less than t2, so i1 can only be Before + if (currentOrder eq After) Concurrent + else compareNext(nextOrElse(i1, cmpEndMarker), nextOrElse(i2, cmpEndMarker), Before) + } else { + // t2 is less than t1, so i1 can only be After + if (currentOrder eq Before) Concurrent + else compareNext(nextOrElse(i1, cmpEndMarker), nextOrElse(i2, cmpEndMarker), After) + } + } else if (nc < 0) { + // this node only exists in i1 so i1 can only be After + if (currentOrder eq Before) Concurrent + else compareNext(nextOrElse(i1, cmpEndMarker), nt2, After) + } else { + // this node only exists in i2 so i1 can only be Before + if (currentOrder eq After) Concurrent + else compareNext(nt1, nextOrElse(i2, cmpEndMarker), Before) + } + } + + compareNext(nextOrElse(i1, cmpEndMarker), nextOrElse(i2, cmpEndMarker), Same) + } + + if ((this eq that) || (this.versions eq that.versions)) Same + else compare(this.versions.iterator, that.versions.iterator, if (order eq Concurrent) FullOrder else order) + } + + /** + * Compare two version vectors. The outcome will be one of the following: + *

+ * {{{ + * 1. Version 1 is SAME (==) as Version 2 iff for all i c1(i) == c2(i) + * 2. Version 1 is BEFORE (<) Version 2 iff for all i c1(i) <= c2(i) and there exist a j such that c1(j) < c2(j) + * 3. Version 1 is AFTER (>) Version 2 iff for all i c1(i) >= c2(i) and there exist a j such that c1(j) > c2(j). + * 4. Version 1 is CONCURRENT (<>) to Version 2 otherwise. + * }}} + */ + def compareTo(that: VersionVector): Ordering = { + compareOnlyTo(that, FullOrder) + } + + /** + * Merges this VersionVector with another VersionVector. E.g. merges its versioned history. + */ + def merge(that: VersionVector): VersionVector = { + var mergedVersions = that.versions + for ((node, time) ← versions) { + val mergedVersionsCurrentTime = mergedVersions.getOrElse(node, Timestamp.Zero) + if (time > mergedVersionsCurrentTime) + mergedVersions = mergedVersions.updated(node, time) + } + VersionVector(mergedVersions) + } + + override def needPruningFrom(removedNode: UniqueAddress): Boolean = + versions.contains(removedNode) + + override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): VersionVector = + copy(versions = versions - removedNode) + collapseInto + + override def pruningCleanup(removedNode: UniqueAddress): VersionVector = copy(versions = versions - removedNode) + + override def toString = versions.map { case ((n, t)) ⇒ n + " -> " + t }.mkString("VersionVector(", ", ", ")") +} diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala new file mode 100644 index 0000000000..5a192cc1ea --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala @@ -0,0 +1,404 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata.protobuf + +import java.{ lang ⇒ jl } +import java.util.ArrayList +import java.util.Collections +import java.util.Comparator +import scala.annotation.tailrec +import scala.collection.JavaConverters._ +import scala.collection.breakOut +import akka.actor.ExtendedActorSystem +import akka.cluster.ddata._ +import akka.cluster.ddata.Replicator._ +import akka.cluster.ddata.Replicator.Internal._ +import akka.cluster.ddata.protobuf.msg.{ ReplicatedDataMessages ⇒ rd } +import akka.cluster.ddata.protobuf.msg.{ ReplicatorMessages ⇒ dm } +import akka.serialization.SerializerWithStringManifest +import akka.serialization.BaseSerializer +import com.google.protobuf.ByteString +import akka.util.ByteString.UTF_8 + +/** + * Protobuf serializer of ReplicatedData. + */ +class ReplicatedDataSerializer(val system: ExtendedActorSystem) + extends SerializerWithStringManifest with SerializationSupport with BaseSerializer { + + private val DeletedDataManifest = "A" + private val GSetManifest = "B" + private val GSetKeyManifest = "b" + private val ORSetManifest = "C" + private val ORSetKeyManifest = "c" + private val FlagManifest = "D" + private val FlagKeyManifest = "d" + private val LWWRegisterManifest = "E" + private val LWWRegisterKeyManifest = "e" + private val GCounterManifest = "F" + private val GCounterKeyManifest = "f" + private val PNCounterManifest = "G" + private val PNCounterKeyManifest = "g" + private val ORMapManifest = "H" + private val ORMapKeyManifest = "h" + private val LWWMapManifest = "I" + private val LWWMapKeyManifest = "i" + private val PNCounterMapManifest = "J" + private val PNCounterMapKeyManifest = "j" + private val VersionVectorManifest = "L" + + private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef]( + GSetManifest -> gsetFromBinary, + ORSetManifest -> orsetFromBinary, + FlagManifest -> flagFromBinary, + LWWRegisterManifest -> lwwRegisterFromBinary, + GCounterManifest -> gcounterFromBinary, + PNCounterManifest -> pncounterFromBinary, + ORMapManifest -> ormapFromBinary, + LWWMapManifest -> lwwmapFromBinary, + PNCounterMapManifest -> pncountermapFromBinary, + DeletedDataManifest -> (_ ⇒ DeletedData), + VersionVectorManifest -> versionVectorFromBinary, + + GSetKeyManifest -> (bytes ⇒ GSetKey(keyIdFromBinary(bytes))), + ORSetKeyManifest -> (bytes ⇒ ORSetKey(keyIdFromBinary(bytes))), + FlagKeyManifest -> (bytes ⇒ FlagKey(keyIdFromBinary(bytes))), + LWWRegisterKeyManifest -> (bytes ⇒ LWWRegisterKey(keyIdFromBinary(bytes))), + GCounterKeyManifest -> (bytes ⇒ GCounterKey(keyIdFromBinary(bytes))), + PNCounterKeyManifest -> (bytes ⇒ PNCounterKey(keyIdFromBinary(bytes))), + ORMapKeyManifest -> (bytes ⇒ ORMapKey(keyIdFromBinary(bytes))), + LWWMapKeyManifest -> (bytes ⇒ LWWMapKey(keyIdFromBinary(bytes))), + PNCounterMapKeyManifest -> (bytes ⇒ PNCounterMapKey(keyIdFromBinary(bytes)))) + + override def manifest(obj: AnyRef): String = obj match { + case _: ORSet[_] ⇒ ORSetManifest + case _: GSet[_] ⇒ GSetManifest + case _: GCounter ⇒ GCounterManifest + case _: PNCounter ⇒ PNCounterManifest + case _: Flag ⇒ FlagManifest + case _: LWWRegister[_] ⇒ LWWRegisterManifest + case _: ORMap[_] ⇒ ORMapManifest + case _: LWWMap[_] ⇒ LWWMapManifest + case _: PNCounterMap ⇒ PNCounterMapManifest + case DeletedData ⇒ DeletedDataManifest + case _: VersionVector ⇒ VersionVectorManifest + + case _: ORSetKey[_] ⇒ ORSetKeyManifest + case _: GSetKey[_] ⇒ GSetKeyManifest + case _: GCounterKey ⇒ GCounterKeyManifest + case _: PNCounterKey ⇒ PNCounterKeyManifest + case _: FlagKey ⇒ FlagKeyManifest + case _: LWWRegisterKey[_] ⇒ LWWRegisterKeyManifest + case _: ORMapKey[_] ⇒ ORMapKeyManifest + case _: LWWMapKey[_] ⇒ LWWMapKeyManifest + case _: PNCounterMapKey ⇒ PNCounterMapKeyManifest + + case _ ⇒ + throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]") + } + + def toBinary(obj: AnyRef): Array[Byte] = obj match { + case m: ORSet[_] ⇒ compress(orsetToProto(m)) + case m: GSet[_] ⇒ gsetToProto(m).toByteArray + case m: GCounter ⇒ gcounterToProto(m).toByteArray + case m: PNCounter ⇒ pncounterToProto(m).toByteArray + case m: Flag ⇒ flagToProto(m).toByteArray + case m: LWWRegister[_] ⇒ lwwRegisterToProto(m).toByteArray + case m: ORMap[_] ⇒ compress(ormapToProto(m)) + case m: LWWMap[_] ⇒ compress(lwwmapToProto(m)) + case m: PNCounterMap ⇒ compress(pncountermapToProto(m)) + case DeletedData ⇒ dm.Empty.getDefaultInstance.toByteArray + case m: VersionVector ⇒ versionVectorToProto(m).toByteArray + case Key(id) ⇒ keyIdToBinary(id) + case _ ⇒ + throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]") + } + + override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = + fromBinaryMap.get(manifest) match { + case Some(f) ⇒ f(bytes) + case None ⇒ throw new IllegalArgumentException( + s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") + } + + def gsetToProto(gset: GSet[_]): rd.GSet = { + val b = rd.GSet.newBuilder() + // using java collections and sorting for performance (avoid conversions) + val stringElements = new ArrayList[String] + val intElements = new ArrayList[Integer] + val longElements = new ArrayList[jl.Long] + val otherElements = new ArrayList[dm.OtherMessage] + gset.elements.foreach { + case s: String ⇒ stringElements.add(s) + case i: Int ⇒ intElements.add(i) + case l: Long ⇒ longElements.add(l) + case other ⇒ otherElements.add(otherMessageToProto(other)) + } + if (!stringElements.isEmpty) { + Collections.sort(stringElements) + b.addAllStringElements(stringElements) + } + if (!intElements.isEmpty) { + Collections.sort(intElements) + b.addAllIntElements(intElements) + } + if (!longElements.isEmpty) { + Collections.sort(longElements) + b.addAllLongElements(longElements) + } + if (!otherElements.isEmpty) { + Collections.sort(otherElements, OtherMessageComparator) + b.addAllOtherElements(otherElements) + } + b.build() + } + + def gsetFromBinary(bytes: Array[Byte]): GSet[_] = + gsetFromProto(rd.GSet.parseFrom(bytes)) + + def gsetFromProto(gset: rd.GSet): GSet[Any] = + GSet(gset.getStringElementsList.iterator.asScala.toSet ++ + gset.getIntElementsList.iterator.asScala ++ + gset.getLongElementsList.iterator.asScala ++ + gset.getOtherElementsList.iterator.asScala.map(otherMessageFromProto)) + + def orsetToProto(orset: ORSet[_]): rd.ORSet = + orsetToProtoImpl(orset.asInstanceOf[ORSet[Any]]) + + private def orsetToProtoImpl(orset: ORSet[Any]): rd.ORSet = { + val b = rd.ORSet.newBuilder().setVvector(versionVectorToProto(orset.vvector)) + // using java collections and sorting for performance (avoid conversions) + val stringElements = new ArrayList[String] + val intElements = new ArrayList[Integer] + val longElements = new ArrayList[jl.Long] + val otherElements = new ArrayList[dm.OtherMessage] + var otherElementsMap = Map.empty[dm.OtherMessage, Any] + orset.elementsMap.keysIterator.foreach { + case s: String ⇒ stringElements.add(s) + case i: Int ⇒ intElements.add(i) + case l: Long ⇒ longElements.add(l) + case other ⇒ + val enclosedMsg = otherMessageToProto(other) + otherElements.add(enclosedMsg) + // need the mapping back to the `other` when adding dots + otherElementsMap = otherElementsMap.updated(enclosedMsg, other) + } + + def addDots(elements: ArrayList[_]): Unit = { + // add corresponding dots in same order + val iter = elements.iterator + while (iter.hasNext) { + val element = iter.next() match { + case enclosedMsg: dm.OtherMessage ⇒ otherElementsMap(enclosedMsg) + case e ⇒ e + } + b.addDots(versionVectorToProto(orset.elementsMap(element))) + } + } + + if (!stringElements.isEmpty) { + Collections.sort(stringElements) + b.addAllStringElements(stringElements) + addDots(stringElements) + } + if (!intElements.isEmpty) { + Collections.sort(intElements) + b.addAllIntElements(intElements) + addDots(intElements) + } + if (!longElements.isEmpty) { + Collections.sort(longElements) + b.addAllLongElements(longElements) + addDots(longElements) + } + if (!otherElements.isEmpty) { + Collections.sort(otherElements, OtherMessageComparator) + b.addAllOtherElements(otherElements) + addDots(otherElements) + } + + b.build() + } + + def orsetFromBinary(bytes: Array[Byte]): ORSet[Any] = + orsetFromProto(rd.ORSet.parseFrom(decompress(bytes))) + + def orsetFromProto(orset: rd.ORSet): ORSet[Any] = { + val elements: Iterator[Any] = + (orset.getStringElementsList.iterator.asScala ++ + orset.getIntElementsList.iterator.asScala ++ + orset.getLongElementsList.iterator.asScala ++ + orset.getOtherElementsList.iterator.asScala.map(otherMessageFromProto)) + + val dots = orset.getDotsList.asScala.map(versionVectorFromProto).iterator + val elementsMap = elements.zip(dots).toMap + + new ORSet(elementsMap, vvector = versionVectorFromProto(orset.getVvector)) + } + + def flagToProto(flag: Flag): rd.Flag = + rd.Flag.newBuilder().setEnabled(flag.enabled).build() + + def flagFromBinary(bytes: Array[Byte]): Flag = + flagFromProto(rd.Flag.parseFrom(bytes)) + + def flagFromProto(flag: rd.Flag): Flag = + Flag(flag.getEnabled) + + def lwwRegisterToProto(lwwRegister: LWWRegister[_]): rd.LWWRegister = + rd.LWWRegister.newBuilder(). + setTimestamp(lwwRegister.timestamp). + setNode(uniqueAddressToProto(lwwRegister.node)). + setState(otherMessageToProto(lwwRegister.value)). + build() + + def lwwRegisterFromBinary(bytes: Array[Byte]): LWWRegister[Any] = + lwwRegisterFromProto(rd.LWWRegister.parseFrom(bytes)) + + def lwwRegisterFromProto(lwwRegister: rd.LWWRegister): LWWRegister[Any] = + new LWWRegister( + uniqueAddressFromProto(lwwRegister.getNode), + otherMessageFromProto(lwwRegister.getState), + lwwRegister.getTimestamp) + + def gcounterToProto(gcounter: GCounter): rd.GCounter = { + val b = rd.GCounter.newBuilder() + gcounter.state.toVector.sortBy { case (address, _) ⇒ address }.foreach { + case (address, value) ⇒ b.addEntries(rd.GCounter.Entry.newBuilder(). + setNode(uniqueAddressToProto(address)).setValue(ByteString.copyFrom(value.toByteArray))) + } + b.build() + } + + def gcounterFromBinary(bytes: Array[Byte]): GCounter = + gcounterFromProto(rd.GCounter.parseFrom(bytes)) + + def gcounterFromProto(gcounter: rd.GCounter): GCounter = { + new GCounter(state = gcounter.getEntriesList.asScala.map(entry ⇒ + uniqueAddressFromProto(entry.getNode) -> BigInt(entry.getValue.toByteArray))(breakOut)) + } + + def pncounterToProto(pncounter: PNCounter): rd.PNCounter = + rd.PNCounter.newBuilder(). + setIncrements(gcounterToProto(pncounter.increments)). + setDecrements(gcounterToProto(pncounter.decrements)). + build() + + def pncounterFromBinary(bytes: Array[Byte]): PNCounter = + pncounterFromProto(rd.PNCounter.parseFrom(bytes)) + + def pncounterFromProto(pncounter: rd.PNCounter): PNCounter = { + new PNCounter( + increments = gcounterFromProto(pncounter.getIncrements), + decrements = gcounterFromProto(pncounter.getDecrements)) + } + + def versionVectorToProto(versionVector: VersionVector): rd.VersionVector = { + val b = rd.VersionVector.newBuilder() + versionVector.versions.foreach { + case (node, value) ⇒ b.addEntries(rd.VersionVector.Entry.newBuilder(). + setNode(uniqueAddressToProto(node)).setVersion(value)) + } + b.build() + } + + def versionVectorFromBinary(bytes: Array[Byte]): VersionVector = + versionVectorFromProto(rd.VersionVector.parseFrom(bytes)) + + def versionVectorFromProto(versionVector: rd.VersionVector): VersionVector = { + VersionVector(versions = versionVector.getEntriesList.asScala.map(entry ⇒ + uniqueAddressFromProto(entry.getNode) -> entry.getVersion)(breakOut)) + } + + def ormapToProto(ormap: ORMap[_]): rd.ORMap = { + val b = rd.ORMap.newBuilder().setKeys(orsetToProto(ormap.keys)) + ormap.entries.toVector.sortBy { case (key, _) ⇒ key }.foreach { + case (key, value) ⇒ b.addEntries(rd.ORMap.Entry.newBuilder(). + setKey(key).setValue(otherMessageToProto(value))) + } + b.build() + } + + def ormapFromBinary(bytes: Array[Byte]): ORMap[ReplicatedData] = + ormapFromProto(rd.ORMap.parseFrom(decompress(bytes))) + + def ormapFromProto(ormap: rd.ORMap): ORMap[ReplicatedData] = { + val entries = ormap.getEntriesList.asScala.map(entry ⇒ + entry.getKey -> otherMessageFromProto(entry.getValue).asInstanceOf[ReplicatedData]).toMap + new ORMap( + keys = orsetFromProto(ormap.getKeys).asInstanceOf[ORSet[String]], + entries) + } + + def lwwmapToProto(lwwmap: LWWMap[_]): rd.LWWMap = { + val b = rd.LWWMap.newBuilder().setKeys(orsetToProto(lwwmap.underlying.keys)) + lwwmap.underlying.entries.toVector.sortBy { case (key, _) ⇒ key }.foreach { + case (key, value) ⇒ b.addEntries(rd.LWWMap.Entry.newBuilder(). + setKey(key).setValue(lwwRegisterToProto(value))) + } + b.build() + } + + def lwwmapFromBinary(bytes: Array[Byte]): LWWMap[Any] = + lwwmapFromProto(rd.LWWMap.parseFrom(decompress(bytes))) + + def lwwmapFromProto(lwwmap: rd.LWWMap): LWWMap[Any] = { + val entries = lwwmap.getEntriesList.asScala.map(entry ⇒ + entry.getKey -> lwwRegisterFromProto(entry.getValue)).toMap + new LWWMap(new ORMap( + keys = orsetFromProto(lwwmap.getKeys).asInstanceOf[ORSet[String]], + entries)) + } + + def pncountermapToProto(pncountermap: PNCounterMap): rd.PNCounterMap = { + val b = rd.PNCounterMap.newBuilder().setKeys(orsetToProto(pncountermap.underlying.keys)) + pncountermap.underlying.entries.toVector.sortBy { case (key, _) ⇒ key }.foreach { + case (key, value: PNCounter) ⇒ b.addEntries(rd.PNCounterMap.Entry.newBuilder(). + setKey(key).setValue(pncounterToProto(value))) + } + b.build() + } + + def pncountermapFromBinary(bytes: Array[Byte]): PNCounterMap = + pncountermapFromProto(rd.PNCounterMap.parseFrom(decompress(bytes))) + + def pncountermapFromProto(pncountermap: rd.PNCounterMap): PNCounterMap = { + val entries = pncountermap.getEntriesList.asScala.map(entry ⇒ + entry.getKey -> pncounterFromProto(entry.getValue)).toMap + new PNCounterMap(new ORMap( + keys = orsetFromProto(pncountermap.getKeys).asInstanceOf[ORSet[String]], + entries)) + } + + def keyIdToBinary(id: String): Array[Byte] = + id.getBytes(UTF_8) + + def keyIdFromBinary(bytes: Array[Byte]): String = + new String(bytes, UTF_8) + +} + +object OtherMessageComparator extends Comparator[dm.OtherMessage] { + override def compare(a: dm.OtherMessage, b: dm.OtherMessage): Int = { + val aByteString = a.getEnclosedMessage + val bByteString = b.getEnclosedMessage + val aSize = aByteString.size + val bSize = bByteString.size + if (aSize == bSize) { + val aIter = aByteString.iterator + val bIter = bByteString.iterator + @tailrec def findDiff(): Int = { + if (aIter.hasNext) { + val aByte = aIter.nextByte() + val bByte = bIter.nextByte() + if (aByte < bByte) -1 + else if (aByte > bByte) 1 + else findDiff() + } else 0 + } + findDiff() + } else if (aSize < bSize) -1 + else 1 + } +} diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala new file mode 100644 index 0000000000..f58e065784 --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala @@ -0,0 +1,326 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata.protobuf + +import java.util.concurrent.TimeUnit +import scala.collection.JavaConverters._ +import scala.collection.breakOut +import scala.concurrent.duration.Duration +import akka.actor.ExtendedActorSystem +import akka.cluster.Member +import akka.cluster.UniqueAddress +import akka.cluster.ddata.PruningState +import akka.cluster.ddata.ReplicatedData +import akka.cluster.ddata.Replicator._ +import akka.cluster.ddata.Replicator.Internal._ +import akka.cluster.ddata.protobuf.msg.{ ReplicatorMessages ⇒ dm } +import akka.serialization.Serialization +import akka.serialization.SerializerWithStringManifest +import akka.serialization.BaseSerializer +import akka.util.{ ByteString ⇒ AkkaByteString } +import com.google.protobuf.ByteString +import akka.cluster.ddata.Key.KeyR + +/** + * Protobuf serializer of ReplicatorMessage messages. + */ +class ReplicatorMessageSerializer(val system: ExtendedActorSystem) + extends SerializerWithStringManifest with SerializationSupport with BaseSerializer { + + val GetManifest = "A" + val GetSuccessManifest = "B" + val NotFoundManifest = "C" + val GetFailureManifest = "D" + val SubscribeManifest = "E" + val UnsubscribeManifest = "F" + val ChangedManifest = "G" + val DataEnvelopeManifest = "H" + val WriteManifest = "I" + val WriteAckManifest = "J" + val ReadManifest = "K" + val ReadResultManifest = "L" + val StatusManifest = "M" + val GossipManifest = "N" + + private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef]( + GetManifest -> getFromBinary, + GetSuccessManifest -> getSuccessFromBinary, + NotFoundManifest -> notFoundFromBinary, + GetFailureManifest -> getFailureFromBinary, + SubscribeManifest -> subscribeFromBinary, + UnsubscribeManifest -> unsubscribeFromBinary, + ChangedManifest -> changedFromBinary, + DataEnvelopeManifest -> dataEnvelopeFromBinary, + WriteManifest -> writeFromBinary, + WriteAckManifest -> (_ ⇒ WriteAck), + ReadManifest -> readFromBinary, + ReadResultManifest -> readResultFromBinary, + StatusManifest -> statusFromBinary, + GossipManifest -> gossipFromBinary) + + override def manifest(obj: AnyRef): String = obj match { + case _: DataEnvelope ⇒ DataEnvelopeManifest + case _: Write ⇒ WriteManifest + case WriteAck ⇒ WriteAckManifest + case _: Read ⇒ ReadManifest + case _: ReadResult ⇒ ReadResultManifest + case _: Status ⇒ StatusManifest + case _: Get[_] ⇒ GetManifest + case _: GetSuccess[_] ⇒ GetSuccessManifest + case _: Changed[_] ⇒ ChangedManifest + case _: NotFound[_] ⇒ NotFoundManifest + case _: GetFailure[_] ⇒ GetFailureManifest + case _: Subscribe[_] ⇒ SubscribeManifest + case _: Unsubscribe[_] ⇒ UnsubscribeManifest + case _: Gossip ⇒ GossipManifest + case _ ⇒ + throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]") + } + + def toBinary(obj: AnyRef): Array[Byte] = obj match { + case m: DataEnvelope ⇒ dataEnvelopeToProto(m).toByteArray + case m: Write ⇒ writeToProto(m).toByteArray + case WriteAck ⇒ dm.Empty.getDefaultInstance.toByteArray + case m: Read ⇒ readToProto(m).toByteArray + case m: ReadResult ⇒ readResultToProto(m).toByteArray + case m: Status ⇒ statusToProto(m).toByteArray + case m: Get[_] ⇒ getToProto(m).toByteArray + case m: GetSuccess[_] ⇒ getSuccessToProto(m).toByteArray + case m: Changed[_] ⇒ changedToProto(m).toByteArray + case m: NotFound[_] ⇒ notFoundToProto(m).toByteArray + case m: GetFailure[_] ⇒ getFailureToProto(m).toByteArray + case m: Subscribe[_] ⇒ subscribeToProto(m).toByteArray + case m: Unsubscribe[_] ⇒ unsubscribeToProto(m).toByteArray + case m: Gossip ⇒ compress(gossipToProto(m)) + case _ ⇒ + throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]") + } + + override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = + fromBinaryMap.get(manifest) match { + case Some(f) ⇒ f(bytes) + case None ⇒ throw new IllegalArgumentException( + s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") + } + + private def statusToProto(status: Status): dm.Status = { + val b = dm.Status.newBuilder() + b.setChunk(status.chunk).setTotChunks(status.totChunks) + val entries = status.digests.foreach { + case (key, digest) ⇒ + b.addEntries(dm.Status.Entry.newBuilder(). + setKey(key). + setDigest(ByteString.copyFrom(digest.toArray))) + } + b.build() + } + + private def statusFromBinary(bytes: Array[Byte]): Status = { + val status = dm.Status.parseFrom(bytes) + Status(status.getEntriesList.asScala.map(e ⇒ + e.getKey -> AkkaByteString(e.getDigest.toByteArray()))(breakOut), + status.getChunk, status.getTotChunks) + } + + private def gossipToProto(gossip: Gossip): dm.Gossip = { + val b = dm.Gossip.newBuilder().setSendBack(gossip.sendBack) + val entries = gossip.updatedData.foreach { + case (key, data) ⇒ + b.addEntries(dm.Gossip.Entry.newBuilder(). + setKey(key). + setEnvelope(dataEnvelopeToProto(data))) + } + b.build() + } + + private def gossipFromBinary(bytes: Array[Byte]): Gossip = { + val gossip = dm.Gossip.parseFrom(decompress(bytes)) + Gossip(gossip.getEntriesList.asScala.map(e ⇒ + e.getKey -> dataEnvelopeFromProto(e.getEnvelope))(breakOut), + sendBack = gossip.getSendBack) + } + + private def getToProto(get: Get[_]): dm.Get = { + val consistencyValue = get.consistency match { + case ReadLocal ⇒ 1 + case ReadFrom(n, _) ⇒ n + case _: ReadMajority ⇒ 0 + case _: ReadAll ⇒ -1 + } + + val b = dm.Get.newBuilder(). + setKey(otherMessageToProto(get.key)). + setConsistency(consistencyValue). + setTimeout(get.consistency.timeout.toMillis.toInt) + + get.request.foreach(o ⇒ b.setRequest(otherMessageToProto(o))) + b.build() + } + + private def getFromBinary(bytes: Array[Byte]): Get[_] = { + val get = dm.Get.parseFrom(bytes) + val key = otherMessageFromProto(get.getKey).asInstanceOf[KeyR] + val request = if (get.hasRequest()) Some(otherMessageFromProto(get.getRequest)) else None + val timeout = Duration(get.getTimeout, TimeUnit.MILLISECONDS) + val consistency = get.getConsistency match { + case 0 ⇒ ReadMajority(timeout) + case -1 ⇒ ReadAll(timeout) + case 1 ⇒ ReadLocal + case n ⇒ ReadFrom(n, timeout) + } + Get(key, consistency, request) + } + + private def getSuccessToProto(getSuccess: GetSuccess[_]): dm.GetSuccess = { + val b = dm.GetSuccess.newBuilder(). + setKey(otherMessageToProto(getSuccess.key)). + setData(otherMessageToProto(getSuccess.dataValue)) + + getSuccess.request.foreach(o ⇒ b.setRequest(otherMessageToProto(o))) + b.build() + } + + private def getSuccessFromBinary(bytes: Array[Byte]): GetSuccess[_] = { + val getSuccess = dm.GetSuccess.parseFrom(bytes) + val key = otherMessageFromProto(getSuccess.getKey).asInstanceOf[KeyR] + val request = if (getSuccess.hasRequest()) Some(otherMessageFromProto(getSuccess.getRequest)) else None + val data = otherMessageFromProto(getSuccess.getData).asInstanceOf[ReplicatedData] + GetSuccess(key, request)(data) + } + + private def notFoundToProto(notFound: NotFound[_]): dm.NotFound = { + val b = dm.NotFound.newBuilder().setKey(otherMessageToProto(notFound.key)) + notFound.request.foreach(o ⇒ b.setRequest(otherMessageToProto(o))) + b.build() + } + + private def notFoundFromBinary(bytes: Array[Byte]): NotFound[_] = { + val notFound = dm.NotFound.parseFrom(bytes) + val request = if (notFound.hasRequest()) Some(otherMessageFromProto(notFound.getRequest)) else None + val key = otherMessageFromProto(notFound.getKey).asInstanceOf[KeyR] + NotFound(key, request) + } + + private def getFailureToProto(getFailure: GetFailure[_]): dm.GetFailure = { + val b = dm.GetFailure.newBuilder().setKey(otherMessageToProto(getFailure.key)) + getFailure.request.foreach(o ⇒ b.setRequest(otherMessageToProto(o))) + b.build() + } + + private def getFailureFromBinary(bytes: Array[Byte]): GetFailure[_] = { + val getFailure = dm.GetFailure.parseFrom(bytes) + val request = if (getFailure.hasRequest()) Some(otherMessageFromProto(getFailure.getRequest)) else None + val key = otherMessageFromProto(getFailure.getKey).asInstanceOf[KeyR] + GetFailure(key, request) + } + + private def subscribeToProto(subscribe: Subscribe[_]): dm.Subscribe = + dm.Subscribe.newBuilder(). + setKey(otherMessageToProto(subscribe.key)). + setRef(Serialization.serializedActorPath(subscribe.subscriber)). + build() + + private def subscribeFromBinary(bytes: Array[Byte]): Subscribe[_] = { + val subscribe = dm.Subscribe.parseFrom(bytes) + val key = otherMessageFromProto(subscribe.getKey).asInstanceOf[KeyR] + Subscribe(key, resolveActorRef(subscribe.getRef)) + } + + private def unsubscribeToProto(unsubscribe: Unsubscribe[_]): dm.Unsubscribe = + dm.Unsubscribe.newBuilder(). + setKey(otherMessageToProto(unsubscribe.key)). + setRef(Serialization.serializedActorPath(unsubscribe.subscriber)). + build() + + private def unsubscribeFromBinary(bytes: Array[Byte]): Unsubscribe[_] = { + val unsubscribe = dm.Unsubscribe.parseFrom(bytes) + val key = otherMessageFromProto(unsubscribe.getKey).asInstanceOf[KeyR] + Unsubscribe(key, resolveActorRef(unsubscribe.getRef)) + } + + private def changedToProto(changed: Changed[_]): dm.Changed = + dm.Changed.newBuilder(). + setKey(otherMessageToProto(changed.key)). + setData(otherMessageToProto(changed.dataValue)). + build() + + private def changedFromBinary(bytes: Array[Byte]): Changed[_] = { + val changed = dm.Changed.parseFrom(bytes) + val data = otherMessageFromProto(changed.getData).asInstanceOf[ReplicatedData] + val key = otherMessageFromProto(changed.getKey).asInstanceOf[KeyR] + Changed(key)(data) + } + + private def dataEnvelopeToProto(dataEnvelope: DataEnvelope): dm.DataEnvelope = { + val dataEnvelopeBuilder = dm.DataEnvelope.newBuilder(). + setData(otherMessageToProto(dataEnvelope.data)) + dataEnvelope.pruning.foreach { + case (removedAddress, state) ⇒ + val b = dm.DataEnvelope.PruningEntry.newBuilder(). + setRemovedAddress(uniqueAddressToProto(removedAddress)). + setOwnerAddress(uniqueAddressToProto(state.owner)) + state.phase match { + case PruningState.PruningInitialized(seen) ⇒ + seen.toVector.sorted(Member.addressOrdering).map(addressToProto).foreach { a ⇒ b.addSeen(a) } + b.setPerformed(false) + case PruningState.PruningPerformed ⇒ + b.setPerformed(true) + } + dataEnvelopeBuilder.addPruning(b) + } + dataEnvelopeBuilder.build() + } + + private def dataEnvelopeFromBinary(bytes: Array[Byte]): DataEnvelope = + dataEnvelopeFromProto(dm.DataEnvelope.parseFrom(bytes)) + + private def dataEnvelopeFromProto(dataEnvelope: dm.DataEnvelope): DataEnvelope = { + val pruning: Map[UniqueAddress, PruningState] = + dataEnvelope.getPruningList.asScala.map { pruningEntry ⇒ + val phase = + if (pruningEntry.getPerformed) PruningState.PruningPerformed + else PruningState.PruningInitialized(pruningEntry.getSeenList.asScala.map(addressFromProto)(breakOut)) + val state = PruningState(uniqueAddressFromProto(pruningEntry.getOwnerAddress), phase) + val removed = uniqueAddressFromProto(pruningEntry.getRemovedAddress) + removed -> state + }(breakOut) + val data = otherMessageFromProto(dataEnvelope.getData).asInstanceOf[ReplicatedData] + DataEnvelope(data, pruning) + } + + private def writeToProto(write: Write): dm.Write = + dm.Write.newBuilder(). + setKey(write.key). + setEnvelope(dataEnvelopeToProto(write.envelope)). + build() + + private def writeFromBinary(bytes: Array[Byte]): Write = { + val write = dm.Write.parseFrom(bytes) + Write(write.getKey, dataEnvelopeFromProto(write.getEnvelope)) + } + + private def readToProto(read: Read): dm.Read = + dm.Read.newBuilder().setKey(read.key).build() + + private def readFromBinary(bytes: Array[Byte]): Read = + Read(dm.Read.parseFrom(bytes).getKey) + + private def readResultToProto(readResult: ReadResult): dm.ReadResult = { + val b = dm.ReadResult.newBuilder() + readResult.envelope match { + case Some(d) ⇒ b.setEnvelope(dataEnvelopeToProto(d)) + case None ⇒ + } + b.build() + } + + private def readResultFromBinary(bytes: Array[Byte]): ReadResult = { + val readResult = dm.ReadResult.parseFrom(bytes) + val envelope = + if (readResult.hasEnvelope) Some(dataEnvelopeFromProto(readResult.getEnvelope)) + else None + ReadResult(envelope) + } + +} diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala new file mode 100644 index 0000000000..bcd92b7d2e --- /dev/null +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala @@ -0,0 +1,144 @@ +/** + * Copyright (C) 2014-2015 Typesafe Inc. + */ +package akka.cluster.ddata.protobuf + +import java.io.ByteArrayInputStream +import java.io.ByteArrayOutputStream +import java.util.zip.GZIPInputStream +import java.util.zip.GZIPOutputStream +import scala.annotation.tailrec +import akka.actor.ActorRef +import akka.actor.Address +import akka.actor.ExtendedActorSystem +import akka.cluster.UniqueAddress +import akka.cluster.ddata.protobuf.msg.{ ReplicatorMessages ⇒ dm } +import akka.serialization.JSerializer +import akka.serialization.Serialization +import akka.serialization.SerializationExtension +import com.google.protobuf.ByteString +import com.google.protobuf.MessageLite +import akka.serialization.SerializerWithStringManifest + +/** + * Some useful serialization helper methods. + */ +trait SerializationSupport { + + private final val BufferSize = 1024 * 4 + + def system: ExtendedActorSystem + + @volatile + private var ser: Serialization = _ + def serialization: Serialization = { + if (ser == null) ser = SerializationExtension(system) + ser + } + + @volatile + private var protocol: String = _ + def addressProtocol: String = { + if (protocol == null) protocol = system.provider.getDefaultAddress.protocol + protocol + } + + @volatile + private var transportInfo: Serialization.Information = _ + def transportInformation: Serialization.Information = { + if (transportInfo == null) { + val address = system.provider.getDefaultAddress + transportInfo = Serialization.Information(address, system) + } + transportInfo + } + + def compress(msg: MessageLite): Array[Byte] = { + val bos = new ByteArrayOutputStream(BufferSize) + val zip = new GZIPOutputStream(bos) + msg.writeTo(zip) + zip.close() + bos.toByteArray + } + + def decompress(bytes: Array[Byte]): Array[Byte] = { + val in = new GZIPInputStream(new ByteArrayInputStream(bytes)) + val out = new ByteArrayOutputStream() + val buffer = new Array[Byte](BufferSize) + + @tailrec def readChunk(): Unit = in.read(buffer) match { + case -1 ⇒ () + case n ⇒ + out.write(buffer, 0, n) + readChunk() + } + + readChunk() + out.toByteArray + } + + def addressToProto(address: Address): dm.Address.Builder = address match { + case Address(_, _, Some(host), Some(port)) ⇒ + dm.Address.newBuilder().setHostname(host).setPort(port) + case _ ⇒ throw new IllegalArgumentException(s"Address [${address}] could not be serialized: host or port missing.") + } + + def addressFromProto(address: dm.Address): Address = + Address(addressProtocol, system.name, address.getHostname, address.getPort) + + def uniqueAddressToProto(uniqueAddress: UniqueAddress): dm.UniqueAddress.Builder = + dm.UniqueAddress.newBuilder().setAddress(addressToProto(uniqueAddress.address)).setUid(uniqueAddress.uid) + + def uniqueAddressFromProto(uniqueAddress: dm.UniqueAddress): UniqueAddress = + UniqueAddress(addressFromProto(uniqueAddress.getAddress), uniqueAddress.getUid) + + def resolveActorRef(path: String): ActorRef = + system.provider.resolveActorRef(path) + + def otherMessageToProto(msg: Any): dm.OtherMessage = { + def buildOther(): dm.OtherMessage = { + val m = msg.asInstanceOf[AnyRef] + val msgSerializer = serialization.findSerializerFor(m) + val builder = dm.OtherMessage.newBuilder(). + setEnclosedMessage(ByteString.copyFrom(msgSerializer.toBinary(m))) + .setSerializerId(msgSerializer.identifier) + + msgSerializer match { + case ser2: SerializerWithStringManifest ⇒ + val manifest = ser2.manifest(m) + if (manifest != "") + builder.setMessageManifest(ByteString.copyFromUtf8(manifest)) + case _ ⇒ + if (msgSerializer.includeManifest) + builder.setMessageManifest(ByteString.copyFromUtf8(m.getClass.getName)) + } + + builder.build() + } + + // Serialize actor references with full address information (defaultAddress). + // When sending remote messages currentTransportInformation is already set, + // but when serializing for digests it must be set here. + if (Serialization.currentTransportInformation.value == null) + Serialization.currentTransportInformation.withValue(transportInformation) { buildOther() } + else + buildOther() + } + + def otherMessageFromBinary(bytes: Array[Byte]): AnyRef = + otherMessageFromProto(dm.OtherMessage.parseFrom(bytes)) + + def otherMessageFromProto(other: dm.OtherMessage): AnyRef = { + val manifest = if (other.hasMessageManifest) other.getMessageManifest.toStringUtf8 else "" + serialization.deserialize( + other.getEnclosedMessage.toByteArray, + other.getSerializerId, + manifest).get + } + +} + +/** + * Java API + */ +abstract class AbstractSerializationSupport extends JSerializer with SerializationSupport diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala new file mode 100644 index 0000000000..c66e88389d --- /dev/null +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala @@ -0,0 +1,283 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import scala.concurrent.duration._ +import scala.concurrent.forkjoin.ThreadLocalRandom + +import akka.cluster.Cluster +import akka.remote.testconductor.RoleName +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.remote.transport.ThrottlerTransportAdapter.Direction +import akka.testkit._ +import com.typesafe.config.ConfigFactory + +object JepsenInspiredInsertSpec extends MultiNodeConfig { + val controller = role("controller") + val n1 = role("n1") + val n2 = role("n2") + val n3 = role("n3") + val n4 = role("n4") + val n5 = role("n5") + + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = INFO + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.log-dead-letters = off + akka.log-dead-letters-during-shutdown = off + akka.remote.log-remote-lifecycle-events = ERROR + akka.testconductor.barrier-timeout = 60 s + """)) + + testTransport(on = true) + +} + +class JepsenInspiredInsertSpecMultiJvmNode1 extends JepsenInspiredInsertSpec +class JepsenInspiredInsertSpecMultiJvmNode2 extends JepsenInspiredInsertSpec +class JepsenInspiredInsertSpecMultiJvmNode3 extends JepsenInspiredInsertSpec +class JepsenInspiredInsertSpecMultiJvmNode4 extends JepsenInspiredInsertSpec +class JepsenInspiredInsertSpecMultiJvmNode5 extends JepsenInspiredInsertSpec +class JepsenInspiredInsertSpecMultiJvmNode6 extends JepsenInspiredInsertSpec + +class JepsenInspiredInsertSpec extends MultiNodeSpec(JepsenInspiredInsertSpec) with STMultiNodeSpec with ImplicitSender { + import JepsenInspiredInsertSpec._ + import Replicator._ + + override def initialParticipants = roles.size + + implicit val cluster = Cluster(system) + val replicator = DistributedData(system).replicator + val nodes = roles.drop(1) // controller not part of active nodes + val nodeCount = nodes.size + val timeout = 3.seconds.dilated + val delayMillis = 0 + val totalCount = 200 + // val delayMillis = 20 + // val totalCount = 2000 + val expectedData = (0 until totalCount).toSet + val data: Map[RoleName, Seq[Int]] = { + val nodeIndex = nodes.zipWithIndex.map { case (n, i) ⇒ i -> n }.toMap + (0 until totalCount).groupBy(i ⇒ nodeIndex(i % nodeCount)) + } + lazy val myData: Seq[Int] = data(myself) + + def sleepDelay(): Unit = + if (delayMillis != 0) { + val rndDelay = ThreadLocalRandom.current().nextInt(delayMillis) + if (rndDelay != 0) Thread.sleep(delayMillis) + } + + def sleepBeforePartition(): Unit = { + if (delayMillis != 0) + Thread.sleep(delayMillis * totalCount / nodeCount / 10) + } + + def sleepDuringPartition(): Unit = + Thread.sleep(math.max(5000, delayMillis * totalCount / nodeCount / 2)) + + def join(from: RoleName, to: RoleName): Unit = { + runOn(from) { + cluster join node(to).address + } + enterBarrier(from.name + "-joined") + } + + "Insert from 5 nodes" must { + + "setup cluster" in { + runOn(nodes: _*) { + nodes.foreach { join(_, n1) } + + within(10.seconds) { + awaitAssert { + replicator ! GetReplicaCount + expectMsg(ReplicaCount(nodes.size)) + } + } + } + + runOn(controller) { + nodes.foreach { n ⇒ enterBarrier(n.name + "-joined") } + } + + enterBarrier("after-setup") + } + } + + "replicate values when all nodes connected" in { + val key = ORSetKey[Int]("A") + runOn(nodes: _*) { + val writeProbe = TestProbe() + val writeAcks = myData.map { i ⇒ + sleepDelay() + replicator.tell(Update(key, ORSet(), WriteLocal, Some(i))(_ + i), writeProbe.ref) + writeProbe.receiveOne(3.seconds) + } + val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] ⇒ success } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] ⇒ fail } + successWriteAcks.map(_.request.get).toSet should be(myData.toSet) + successWriteAcks.size should be(myData.size) + failureWriteAcks should be(Nil) + (successWriteAcks.size + failureWriteAcks.size) should be(myData.size) + + // eventually all nodes will have the data + within(15.seconds) { + awaitAssert { + val readProbe = TestProbe() + replicator.tell(Get(key, ReadLocal), readProbe.ref) + val result = readProbe.expectMsgPF() { case g @ GetSuccess(`key`, _) ⇒ g.get(key) } + result.elements should be(expectedData) + } + } + + } + + enterBarrier("after-test-1") + } + + "write/read to majority when all nodes connected" in { + val key = ORSetKey[Int]("B") + val readMajority = ReadMajority(timeout) + val writeMajority = WriteMajority(timeout) + runOn(nodes: _*) { + val writeProbe = TestProbe() + val writeAcks = myData.map { i ⇒ + sleepDelay() + replicator.tell(Update(key, ORSet(), writeMajority, Some(i))(_ + i), writeProbe.ref) + writeProbe.receiveOne(timeout + 1.second) + } + val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] ⇒ success } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] ⇒ fail } + successWriteAcks.map(_.request.get).toSet should be(myData.toSet) + successWriteAcks.size should be(myData.size) + failureWriteAcks should be(Nil) + (successWriteAcks.size + failureWriteAcks.size) should be(myData.size) + + enterBarrier("data-written-2") + + // read from majority of nodes, which is enough to retrieve all data + val readProbe = TestProbe() + replicator.tell(Get(key, readMajority), readProbe.ref) + val result = readProbe.expectMsgPF() { case g @ GetSuccess(`key`, _) ⇒ g.get(key) } + val survivors = result.elements.size + result.elements should be(expectedData) + + } + + runOn(controller) { + enterBarrier("data-written-2") + } + + enterBarrier("after-test-2") + } + + "replicate values after partition" in { + val key = ORSetKey[Int]("C") + runOn(controller) { + sleepBeforePartition() + for (a ← List(n1, n4, n5); b ← List(n2, n3)) + testConductor.blackhole(a, b, Direction.Both).await + sleepDuringPartition() + for (a ← List(n1, n4, n5); b ← List(n2, n3)) + testConductor.passThrough(a, b, Direction.Both).await + enterBarrier("partition-healed-3") + } + + runOn(nodes: _*) { + val writeProbe = TestProbe() + val writeAcks = myData.map { i ⇒ + sleepDelay() + replicator.tell(Update(key, ORSet(), WriteLocal, Some(i))(_ + i), writeProbe.ref) + writeProbe.receiveOne(3.seconds) + } + val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] ⇒ success } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] ⇒ fail } + successWriteAcks.map(_.request.get).toSet should be(myData.toSet) + successWriteAcks.size should be(myData.size) + failureWriteAcks should be(Nil) + (successWriteAcks.size + failureWriteAcks.size) should be(myData.size) + + enterBarrier("partition-healed-3") + + // eventually all nodes will have the data + within(15.seconds) { + awaitAssert { + val readProbe = TestProbe() + replicator.tell(Get(key, ReadLocal), readProbe.ref) + val result = readProbe.expectMsgPF() { case g @ GetSuccess(`key`, _) ⇒ g.get(key) } + result.elements should be(expectedData) + } + } + + } + + enterBarrier("after-test-3") + } + + "write to majority during 3+2 partition and read from majority after partition" in { + val key = ORSetKey[Int]("D") + val readMajority = ReadMajority(timeout) + val writeMajority = WriteMajority(timeout) + runOn(controller) { + sleepBeforePartition() + for (a ← List(n1, n4, n5); b ← List(n2, n3)) + testConductor.blackhole(a, b, Direction.Both).await + sleepDuringPartition() + for (a ← List(n1, n4, n5); b ← List(n2, n3)) + testConductor.passThrough(a, b, Direction.Both).await + enterBarrier("partition-healed-4") + } + + runOn(nodes: _*) { + val writeProbe = TestProbe() + val writeAcks = myData.map { i ⇒ + sleepDelay() + replicator.tell(Update(key, ORSet(), writeMajority, Some(i))(_ + i), writeProbe.ref) + writeProbe.receiveOne(timeout + 1.second) + } + val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] ⇒ success } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] ⇒ fail } + runOn(n1, n4, n5) { + successWriteAcks.map(_.request.get).toSet should be(myData.toSet) + successWriteAcks.size should be(myData.size) + failureWriteAcks should be(Nil) + } + runOn(n2, n3) { + // without delays all could teoretically have been written before the blackhole + if (delayMillis != 0) + failureWriteAcks should not be (Nil) + } + (successWriteAcks.size + failureWriteAcks.size) should be(myData.size) + + enterBarrier("partition-healed-4") + + // on the 2 node side, read from majority of nodes is enough to read all writes + runOn(n2, n3) { + val readProbe = TestProbe() + replicator.tell(Get(key, readMajority), readProbe.ref) + val result = readProbe.expectMsgPF() { case g @ GetSuccess(`key`, _) ⇒ g.get(key) } + val survivors = result.elements.size + result.elements should be(expectedData) + } + // but on the 3 node side, read from majority doesn't mean that we are guaranteed to see + // the writes from the other side, yet + + // eventually all nodes will have the data + within(15.seconds) { + awaitAssert { + val readProbe = TestProbe() + replicator.tell(Get(key, ReadLocal), readProbe.ref) + val result = readProbe.expectMsgPF() { case g @ GetSuccess(`key`, _) ⇒ g.get(key) } + result.elements should be(expectedData) + } + } + } + + enterBarrier("after-test-4") + } + +} + diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala new file mode 100644 index 0000000000..15794fac68 --- /dev/null +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala @@ -0,0 +1,233 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import scala.concurrent.Await +import scala.concurrent.duration._ +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.Deploy +import akka.actor.Props +import akka.cluster.Cluster +import akka.remote.testconductor.RoleName +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import com.typesafe.config.ConfigFactory + +object PerformanceSpec extends MultiNodeConfig { + val n1 = role("n1") + val n2 = role("n2") + val n3 = role("n3") + val n4 = role("n4") + val n5 = role("n5") + + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = ERROR + akka.stdout-loglevel = ERROR + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.log-dead-letters = off + akka.log-dead-letters-during-shutdown = off + akka.remote.log-remote-lifecycle-events = ERROR + akka.remote.log-frame-size-exceeding=1000b + akka.testconductor.barrier-timeout = 60 s + akka.cluster.distributed-data.gossip-interval = 1 s + akka.actor.serialize-messages = off + """)) + + def countDownProps(latch: TestLatch): Props = Props(new CountDown(latch)).withDeploy(Deploy.local) + + class CountDown(latch: TestLatch) extends Actor { + def receive = { + case _ ⇒ + latch.countDown() + if (latch.isOpen) + context.stop(self) + } + } + +} + +class PerformanceSpecMultiJvmNode1 extends PerformanceSpec +class PerformanceSpecMultiJvmNode2 extends PerformanceSpec +class PerformanceSpecMultiJvmNode3 extends PerformanceSpec +class PerformanceSpecMultiJvmNode4 extends PerformanceSpec +class PerformanceSpecMultiJvmNode5 extends PerformanceSpec + +class PerformanceSpec extends MultiNodeSpec(PerformanceSpec) with STMultiNodeSpec with ImplicitSender { + import PerformanceSpec._ + import Replicator._ + + override def initialParticipants = roles.size + + implicit val cluster = Cluster(system) + val replicator = DistributedData(system).replicator + val timeout = 3.seconds.dilated + val factor = 1 // use 3 here for serious tuning + val repeatCount = 3 // use at least 10 here for serious tuning + + def join(from: RoleName, to: RoleName): Unit = { + runOn(from) { + cluster join node(to).address + } + enterBarrier(from.name + "-joined") + } + + def repeat(description: String, keys: Iterable[ORSetKey[Int]], n: Int, expectedAfterReplication: Option[Set[Int]] = None)( + block: (ORSetKey[Int], Int, ActorRef) ⇒ Unit, afterEachKey: ORSetKey[Int] ⇒ Unit = _ ⇒ ()): Unit = { + + keys.foreach { key ⇒ + val startTime = System.nanoTime() + runOn(n1) { + val latch = TestLatch(n) + val replyTo = system.actorOf(countDownProps(latch)) + + var i = 0 + while (i < n) { + block(key, i, replyTo) + i += 1 + } + Await.ready(latch, 5.seconds + (1.second * factor)) + } + expectedAfterReplication.foreach { expected ⇒ + enterBarrier("repeat-" + key + "-before-awaitReplicated") + awaitReplicated(key, expected) + enterBarrier("repeat-" + key + "-after-awaitReplicated") + } + runOn(n1) { + val endTime = System.nanoTime() + val durationMs = (endTime - startTime).nanos.toMillis + val tps = (n * 1000.0 / durationMs).toInt + println(s"## $n $description took $durationMs ms, $tps TPS") + } + + afterEachKey(key) + enterBarrier("repeat-" + key + "-done") + } + } + + def awaitReplicated(keys: Iterable[ORSetKey[Int]], expectedData: Set[Int]): Unit = + keys.foreach { key ⇒ awaitReplicated(key, expectedData) } + + def awaitReplicated(key: ORSetKey[Int], expectedData: Set[Int]): Unit = { + within(20.seconds) { + awaitAssert { + val readProbe = TestProbe() + replicator.tell(Get(key, ReadLocal), readProbe.ref) + val result = readProbe.expectMsgPF() { case g @ GetSuccess(`key`, _) ⇒ g.get(key) } + result.elements should be(expectedData) + } + } + } + + "Performance" must { + + "setup cluster" in { + roles.foreach { join(_, n1) } + + within(10.seconds) { + awaitAssert { + replicator ! GetReplicaCount + expectMsg(ReplicaCount(roles.size)) + } + } + + enterBarrier("after-setup") + } + + "be great for ORSet Update WriteLocal" in { + val keys = (1 to repeatCount).map(n ⇒ ORSetKey[Int]("A" + n)) + val n = 1000 * factor + val expectedData = (0 until n).toSet + repeat("ORSet Update WriteLocal", keys, n)({ (key, i, replyTo) ⇒ + replicator.tell(Update(key, ORSet(), WriteLocal)(_ + i), replyTo) + }, key ⇒ awaitReplicated(key, expectedData)) + + enterBarrier("after-1") + } + + "be blazingly fast for ORSet Get ReadLocal" in { + val keys = (1 to repeatCount).map(n ⇒ ORSetKey[Int]("A" + n)) + repeat("Get ReadLocal", keys, 100000 * factor) { (key, i, replyTo) ⇒ + replicator.tell(Get(key, ReadLocal), replyTo) + } + enterBarrier("after-2") + } + + "be good for ORSet Update WriteLocal and gossip replication" in { + val keys = (1 to repeatCount).map(n ⇒ ORSetKey[Int]("B" + n)) + val n = 200 * factor + val expected = Some((0 until n).toSet) + repeat("ORSet Update WriteLocal + gossip", keys, n, expected) { (key, i, replyTo) ⇒ + replicator.tell(Update(key, ORSet(), WriteLocal)(_ + i), replyTo) + } + enterBarrier("after-3") + } + + "be good for ORSet Update WriteLocal and gossip of existing keys" in { + val keys = (1 to repeatCount).map(n ⇒ ORSetKey[Int]("B" + n)) + val n = 200 * factor + val expected = Some((0 until n).toSet ++ (0 until n).map(-_).toSet) + repeat("ORSet Update WriteLocal existing + gossip", keys, n, expected) { (key, i, replyTo) ⇒ + replicator.tell(Update(key, ORSet(), WriteLocal)(_ + (-i)), replyTo) + } + enterBarrier("after-4") + } + + "be good for ORSet Update WriteTwo and gossip replication" in { + val keys = (1 to repeatCount).map(n ⇒ ORSetKey[Int]("C" + n)) + val n = 200 * factor + val expected = Some((0 until n).toSet) + val writeTwo = WriteTo(2, timeout) + repeat("ORSet Update WriteTwo + gossip", keys, n, expected) { (key, i, replyTo) ⇒ + replicator.tell(Update(key, ORSet(), writeTwo)(_ + i), replyTo) + } + enterBarrier("after-5") + } + + "be awesome for GCounter Update WriteLocal" in { + val startTime = System.nanoTime() + val n = 1000 * factor + val key = GCounterKey("D") + runOn(n1, n2, n3) { + val latch = TestLatch(n) + val replyTo = system.actorOf(countDownProps(latch)) + for (_ ← 0 until n) + replicator.tell(Update(key, GCounter(), WriteLocal)(_ + 1), replyTo) + Await.ready(latch, 5.seconds + (1.second * factor)) + enterBarrier("update-done-6") + runOn(n1) { + val endTime = System.nanoTime() + val durationMs = (endTime - startTime).nanos.toMillis + val tps = (3 * n * 1000.0 / durationMs).toInt + println(s"## ${3 * n} GCounter Update took $durationMs ms, $tps TPS") + } + } + runOn(n4, n5) { + enterBarrier("update-done-6") + } + + within(20.seconds) { + awaitAssert { + val readProbe = TestProbe() + replicator.tell(Get(key, ReadLocal), readProbe.ref) + val result = readProbe.expectMsgPF() { case g @ GetSuccess(`key`, _) ⇒ g.get(key) } + result.value should be(3 * n) + } + } + enterBarrier("replication-done-6") + runOn(n1) { + val endTime = System.nanoTime() + val durationMs = (endTime - startTime).nanos.toMillis + val tps = (n * 1000.0 / durationMs).toInt + println(s"## $n GCounter Update + gossip took $durationMs ms, $tps TPS") + } + + enterBarrier("after-6") + } + + } + +} + diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala new file mode 100644 index 0000000000..becfcf2dfc --- /dev/null +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala @@ -0,0 +1,234 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import scala.concurrent.duration._ + +import akka.cluster.Cluster +import akka.remote.testconductor.RoleName +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.remote.transport.ThrottlerTransportAdapter.Direction +import akka.testkit._ +import com.typesafe.config.ConfigFactory + +object ReplicatorChaosSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + val fifth = role("fifth") + + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = INFO + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.cluster.roles = ["backend"] + akka.log-dead-letters-during-shutdown = off + """)) + + testTransport(on = true) +} + +class ReplicatorChaosSpecMultiJvmNode1 extends ReplicatorChaosSpec +class ReplicatorChaosSpecMultiJvmNode2 extends ReplicatorChaosSpec +class ReplicatorChaosSpecMultiJvmNode3 extends ReplicatorChaosSpec +class ReplicatorChaosSpecMultiJvmNode4 extends ReplicatorChaosSpec +class ReplicatorChaosSpecMultiJvmNode5 extends ReplicatorChaosSpec + +class ReplicatorChaosSpec extends MultiNodeSpec(ReplicatorChaosSpec) with STMultiNodeSpec with ImplicitSender { + import ReplicatorChaosSpec._ + import Replicator._ + + override def initialParticipants = roles.size + + implicit val cluster = Cluster(system) + val replicator = system.actorOf(Replicator.props( + ReplicatorSettings(system).withRole("backend").withGossipInterval(1.second)), "replicator") + val timeout = 3.seconds.dilated + + val KeyA = GCounterKey("A") + val KeyB = PNCounterKey("B") + val KeyC = GCounterKey("C") + val KeyD = GCounterKey("D") + val KeyE = GSetKey[String]("E") + val KeyF = ORSetKey[String]("F") + val KeyX = GCounterKey("X") + + def join(from: RoleName, to: RoleName): Unit = { + runOn(from) { + cluster join node(to).address + } + enterBarrier(from.name + "-joined") + } + + def assertValue(key: Key[ReplicatedData], expected: Any): Unit = + within(10.seconds) { + awaitAssert { + replicator ! Get(key, ReadLocal) + val value = expectMsgPF() { + case g @ GetSuccess(`key`, _) ⇒ g.dataValue match { + case c: GCounter ⇒ c.value + case c: PNCounter ⇒ c.value + case c: GSet[_] ⇒ c.elements + case c: ORSet[_] ⇒ c.elements + } + } + value should be(expected) + } + } + + def assertDeleted(key: Key[ReplicatedData]): Unit = + within(5.seconds) { + awaitAssert { + replicator ! Get(key, ReadLocal) + expectMsg(DataDeleted(key)) + } + } + + "Replicator in chaotic cluster" must { + + "replicate data in initial phase" in { + join(first, first) + join(second, first) + join(third, first) + join(fourth, first) + join(fifth, first) + + within(10.seconds) { + awaitAssert { + replicator ! GetReplicaCount + expectMsg(ReplicaCount(5)) + } + } + + runOn(first) { + (0 until 5).foreach { i ⇒ + replicator ! Update(KeyA, GCounter(), WriteLocal)(_ + 1) + replicator ! Update(KeyB, PNCounter(), WriteLocal)(_ - 1) + replicator ! Update(KeyC, GCounter(), WriteAll(timeout))(_ + 1) + } + receiveN(15).map(_.getClass).toSet should be(Set(classOf[UpdateSuccess[_]])) + } + + runOn(second) { + replicator ! Update(KeyA, GCounter(), WriteLocal)(_ + 20) + replicator ! Update(KeyB, PNCounter(), WriteTo(2, timeout))(_ + 20) + replicator ! Update(KeyC, GCounter(), WriteAll(timeout))(_ + 20) + receiveN(3).toSet should be(Set(UpdateSuccess(KeyA, None), + UpdateSuccess(KeyB, None), UpdateSuccess(KeyC, None))) + + replicator ! Update(KeyE, GSet(), WriteLocal)(_ + "e1" + "e2") + expectMsg(UpdateSuccess(KeyE, None)) + + replicator ! Update(KeyF, ORSet(), WriteLocal)(_ + "e1" + "e2") + expectMsg(UpdateSuccess(KeyF, None)) + } + + runOn(fourth) { + replicator ! Update(KeyD, GCounter(), WriteLocal)(_ + 40) + expectMsg(UpdateSuccess(KeyD, None)) + + replicator ! Update(KeyE, GSet(), WriteLocal)(_ + "e2" + "e3") + expectMsg(UpdateSuccess(KeyE, None)) + + replicator ! Update(KeyF, ORSet(), WriteLocal)(_ + "e2" + "e3") + expectMsg(UpdateSuccess(KeyF, None)) + } + + runOn(fifth) { + replicator ! Update(KeyX, GCounter(), WriteTo(2, timeout))(_ + 50) + expectMsg(UpdateSuccess(KeyX, None)) + replicator ! Delete(KeyX, WriteLocal) + expectMsg(DeleteSuccess(KeyX)) + } + + enterBarrier("initial-updates-done") + + assertValue(KeyA, 25) + assertValue(KeyB, 15) + assertValue(KeyC, 25) + assertValue(KeyD, 40) + assertValue(KeyE, Set("e1", "e2", "e3")) + assertValue(KeyF, Set("e1", "e2", "e3")) + assertDeleted(KeyX) + + enterBarrier("after-1") + } + + "be available during network split" in { + val side1 = Seq(first, second) + val side2 = Seq(third, fourth, fifth) + runOn(first) { + for (a ← side1; b ← side2) + testConductor.blackhole(a, b, Direction.Both).await + } + enterBarrier("split") + + runOn(first) { + replicator ! Update(KeyA, GCounter(), WriteTo(2, timeout))(_ + 1) + expectMsg(UpdateSuccess(KeyA, None)) + } + + runOn(third) { + replicator ! Update(KeyA, GCounter(), WriteTo(2, timeout))(_ + 2) + expectMsg(UpdateSuccess(KeyA, None)) + + replicator ! Update(KeyE, GSet(), WriteTo(2, timeout))(_ + "e4") + expectMsg(UpdateSuccess(KeyE, None)) + + replicator ! Update(KeyF, ORSet(), WriteTo(2, timeout))(_ - "e2") + expectMsg(UpdateSuccess(KeyF, None)) + } + runOn(fourth) { + replicator ! Update(KeyD, GCounter(), WriteTo(2, timeout))(_ + 1) + expectMsg(UpdateSuccess(KeyD, None)) + } + enterBarrier("update-during-split") + + runOn(side1: _*) { + assertValue(KeyA, 26) + assertValue(KeyB, 15) + assertValue(KeyD, 40) + assertValue(KeyE, Set("e1", "e2", "e3")) + assertValue(KeyF, Set("e1", "e2", "e3")) + } + runOn(side2: _*) { + assertValue(KeyA, 27) + assertValue(KeyB, 15) + assertValue(KeyD, 41) + assertValue(KeyE, Set("e1", "e2", "e3", "e4")) + assertValue(KeyF, Set("e1", "e3")) + } + enterBarrier("update-during-split-verified") + + runOn(first) { + testConductor.exit(fourth, 0).await + } + + enterBarrier("after-2") + } + + "converge after partition" in { + val side1 = Seq(first, second) + val side2 = Seq(third, fifth) // fourth was shutdown + runOn(first) { + for (a ← side1; b ← side2) + testConductor.passThrough(a, b, Direction.Both).await + } + enterBarrier("split-repaired") + + assertValue(KeyA, 28) + assertValue(KeyB, 15) + assertValue(KeyC, 25) + assertValue(KeyD, 41) + assertValue(KeyE, Set("e1", "e2", "e3", "e4")) + assertValue(KeyF, Set("e1", "e3")) + assertDeleted(KeyX) + + enterBarrier("after-3") + } + } + +} + diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala new file mode 100644 index 0000000000..b91c1b8795 --- /dev/null +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala @@ -0,0 +1,197 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import scala.concurrent.duration._ + +import akka.cluster.Cluster +import akka.cluster.ClusterEvent.InitialStateAsEvents +import akka.cluster.ClusterEvent.MemberUp +import akka.remote.testconductor.RoleName +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import com.typesafe.config.ConfigFactory + +object ReplicatorPruningSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = INFO + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.log-dead-letters-during-shutdown = off + """)) + +} + +class ReplicatorPruningSpecMultiJvmNode1 extends ReplicatorPruningSpec +class ReplicatorPruningSpecMultiJvmNode2 extends ReplicatorPruningSpec +class ReplicatorPruningSpecMultiJvmNode3 extends ReplicatorPruningSpec + +class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with STMultiNodeSpec with ImplicitSender { + import ReplicatorPruningSpec._ + import Replicator._ + + override def initialParticipants = roles.size + + implicit val cluster = Cluster(system) + val maxPruningDissemination = 3.seconds + val replicator = system.actorOf(Replicator.props( + ReplicatorSettings(system).withGossipInterval(1.second) + .withPruning(pruningInterval = 1.second, maxPruningDissemination)), "replicator") + val timeout = 2.seconds.dilated + + val KeyA = GCounterKey("A") + val KeyB = ORSetKey[String]("B") + val KeyC = PNCounterMapKey("C") + + def join(from: RoleName, to: RoleName): Unit = { + runOn(from) { + cluster join node(to).address + } + enterBarrier(from.name + "-joined") + } + + "Pruning of CRDT" must { + + "move data from removed node" in { + join(first, first) + join(second, first) + join(third, first) + + within(5.seconds) { + awaitAssert { + replicator ! GetReplicaCount + expectMsg(ReplicaCount(3)) + } + } + + // we need the UniqueAddress + val memberProbe = TestProbe() + cluster.subscribe(memberProbe.ref, initialStateMode = InitialStateAsEvents, classOf[MemberUp]) + val thirdUniqueAddress = { + val member = memberProbe.fishForMessage(3.seconds) { + case MemberUp(m) if m.address == node(third).address ⇒ true + case _ ⇒ false + }.asInstanceOf[MemberUp].member + member.uniqueAddress + } + + replicator ! Update(KeyA, GCounter(), WriteAll(timeout))(_ + 3) + expectMsg(UpdateSuccess(KeyA, None)) + + replicator ! Update(KeyB, ORSet(), WriteAll(timeout))(_ + "a" + "b" + "c") + expectMsg(UpdateSuccess(KeyB, None)) + + replicator ! Update(KeyC, PNCounterMap(), WriteAll(timeout))(_ increment "x" increment "y") + expectMsg(UpdateSuccess(KeyC, None)) + + enterBarrier("updates-done") + + replicator ! Get(KeyA, ReadLocal) + val oldCounter = expectMsgType[GetSuccess[GCounter]].dataValue + oldCounter.value should be(9) + + replicator ! Get(KeyB, ReadLocal) + val oldSet = expectMsgType[GetSuccess[ORSet[String]]].dataValue + oldSet.elements should be(Set("a", "b", "c")) + + replicator ! Get(KeyC, ReadLocal) + val oldMap = expectMsgType[GetSuccess[PNCounterMap]].dataValue + oldMap.get("x") should be(Some(3)) + oldMap.get("y") should be(Some(3)) + + enterBarrier("get-old") + + runOn(first) { + cluster.leave(node(third).address) + } + + runOn(first, second) { + within(15.seconds) { + awaitAssert { + replicator ! GetReplicaCount + expectMsg(ReplicaCount(2)) + } + } + } + enterBarrier("third-removed") + + runOn(first, second) { + within(15.seconds) { + awaitAssert { + replicator ! Get(KeyA, ReadLocal) + expectMsgPF() { + case g @ GetSuccess(KeyA, _) ⇒ + g.get(KeyA).value should be(9) + g.get(KeyA).needPruningFrom(thirdUniqueAddress) should be(false) + } + } + } + within(5.seconds) { + awaitAssert { + replicator ! Get(KeyB, ReadLocal) + expectMsgPF() { + case g @ GetSuccess(KeyB, _) ⇒ + g.get(KeyB).elements should be(Set("a", "b", "c")) + g.get(KeyB).needPruningFrom(thirdUniqueAddress) should be(false) + } + } + } + within(5.seconds) { + awaitAssert { + replicator ! Get(KeyC, ReadLocal) + expectMsgPF() { + case g @ GetSuccess(KeyC, _) ⇒ + g.get(KeyC).entries should be(Map("x" -> 3L, "y" -> 3L)) + g.get(KeyC).needPruningFrom(thirdUniqueAddress) should be(false) + } + } + } + } + enterBarrier("pruning-done") + + // on one of the nodes the data has been updated by the pruning, + // client can update anyway + def updateAfterPruning(expectedValue: Int): Unit = { + replicator ! Update(KeyA, GCounter(), WriteAll(timeout), None)(_ + 1) + expectMsgPF() { + case UpdateSuccess(KeyA, _) ⇒ + replicator ! Get(KeyA, ReadLocal) + val retrieved = expectMsgType[GetSuccess[GCounter]].dataValue + retrieved.value should be(expectedValue) + } + } + runOn(first) { + updateAfterPruning(expectedValue = 10) + } + enterBarrier("update-first-after-pruning") + + runOn(second) { + updateAfterPruning(expectedValue = 11) + } + enterBarrier("update-second-after-pruning") + + // after pruning performed and maxDissemination it is tombstoned + // and we should still not be able to update with data from removed node + expectNoMsg(maxPruningDissemination + 3.seconds) + + runOn(first) { + updateAfterPruning(expectedValue = 12) + } + enterBarrier("update-first-after-tombstone") + + runOn(second) { + updateAfterPruning(expectedValue = 13) + } + enterBarrier("update-second-after-tombstone") + + enterBarrier("after-1") + } + } + +} + diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala new file mode 100644 index 0000000000..0c220218a3 --- /dev/null +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala @@ -0,0 +1,503 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import scala.concurrent.duration._ + +import akka.cluster.Cluster +import akka.remote.testconductor.RoleName +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.remote.transport.ThrottlerTransportAdapter.Direction +import akka.testkit._ +import com.typesafe.config.ConfigFactory + +object ReplicatorSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = INFO + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.log-dead-letters-during-shutdown = off + """)) + + testTransport(on = true) + +} + +class ReplicatorSpecMultiJvmNode1 extends ReplicatorSpec +class ReplicatorSpecMultiJvmNode2 extends ReplicatorSpec +class ReplicatorSpecMultiJvmNode3 extends ReplicatorSpec + +class ReplicatorSpec extends MultiNodeSpec(ReplicatorSpec) with STMultiNodeSpec with ImplicitSender { + import ReplicatorSpec._ + import Replicator._ + + override def initialParticipants = roles.size + + implicit val cluster = Cluster(system) + val replicator = system.actorOf(Replicator.props( + ReplicatorSettings(system).withGossipInterval(1.second).withMaxDeltaElements(10)), "replicator") + val timeout = 2.seconds.dilated + val writeTwo = WriteTo(2, timeout) + val writeMajority = WriteMajority(timeout) + val writeAll = WriteAll(timeout) + val readTwo = ReadFrom(2, timeout) + val readAll = ReadAll(timeout) + val readMajority = ReadMajority(timeout) + + val KeyA = GCounterKey("A") + val KeyB = GCounterKey("B") + val KeyC = GCounterKey("C") + val KeyD = GCounterKey("D") + val KeyE = GCounterKey("E") + val KeyE2 = GCounterKey("E2") + val KeyF = GCounterKey("F") + val KeyG = ORSetKey[String]("G") + val KeyX = GCounterKey("X") + val KeyY = GCounterKey("Y") + val KeyZ = GCounterKey("Z") + + def join(from: RoleName, to: RoleName): Unit = { + runOn(from) { + cluster join node(to).address + } + enterBarrier(from.name + "-joined") + } + + "Cluster CRDT" must { + + "work in single node cluster" in { + join(first, first) + + runOn(first) { + + within(5.seconds) { + awaitAssert { + replicator ! GetReplicaCount + expectMsg(ReplicaCount(1)) + } + } + + val changedProbe = TestProbe() + replicator ! Subscribe(KeyA, changedProbe.ref) + replicator ! Subscribe(KeyX, changedProbe.ref) + + replicator ! Get(KeyA, ReadLocal) + expectMsg(NotFound(KeyA, None)) + + val c3 = GCounter() + 3 + replicator ! Update(KeyA, GCounter(), WriteLocal)(_ + 3) + expectMsg(UpdateSuccess(KeyA, None)) + replicator ! Get(KeyA, ReadLocal) + expectMsg(GetSuccess(KeyA, None)(c3)).dataValue should be(c3) + changedProbe.expectMsg(Changed(KeyA)(c3)).dataValue should be(c3) + + val changedProbe2 = TestProbe() + replicator ! Subscribe(KeyA, changedProbe2.ref) + changedProbe2.expectMsg(Changed(KeyA)(c3)).dataValue should be(c3) + + val c4 = c3 + 1 + // too strong consistency level + replicator ! Update(KeyA, GCounter(), writeTwo)(_ + 1) + expectMsg(UpdateTimeout(KeyA, None)) + replicator ! Get(KeyA, ReadLocal) + expectMsg(GetSuccess(KeyA, None)(c4)).dataValue should be(c4) + changedProbe.expectMsg(Changed(KeyA)(c4)).dataValue should be(c4) + + val c5 = c4 + 1 + // too strong consistency level + replicator ! Update(KeyA, GCounter(), writeMajority)(_ + 1) + expectMsg(UpdateSuccess(KeyA, None)) + replicator ! Get(KeyA, readMajority) + expectMsg(GetSuccess(KeyA, None)(c5)).dataValue should be(c5) + changedProbe.expectMsg(Changed(KeyA)(c5)).dataValue should be(c5) + + val c6 = c5 + 1 + replicator ! Update(KeyA, GCounter(), writeAll)(_ + 1) + expectMsg(UpdateSuccess(KeyA, None)) + replicator ! Get(KeyA, readAll) + expectMsg(GetSuccess(KeyA, None)(c6)).dataValue should be(c6) + changedProbe.expectMsg(Changed(KeyA)(c6)).dataValue should be(c6) + + val c9 = GCounter() + 9 + replicator ! Update(KeyX, GCounter(), WriteLocal)(_ + 9) + expectMsg(UpdateSuccess(KeyX, None)) + changedProbe.expectMsg(Changed(KeyX)(c9)).dataValue should be(c9) + replicator ! Delete(KeyX, WriteLocal) + expectMsg(DeleteSuccess(KeyX)) + changedProbe.expectMsg(DataDeleted(KeyX)) + replicator ! Get(KeyX, ReadLocal) + expectMsg(DataDeleted(KeyX)) + replicator ! Get(KeyX, readAll) + expectMsg(DataDeleted(KeyX)) + replicator ! Update(KeyX, GCounter(), WriteLocal)(_ + 1) + expectMsg(DataDeleted(KeyX)) + replicator ! Delete(KeyX, WriteLocal) + expectMsg(DataDeleted(KeyX)) + + replicator ! GetKeyIds + expectMsg(GetKeyIdsResult(Set("A"))) + } + + enterBarrier("after-1") + } + } + + "reply with ModifyFailure if exception is thrown by modify function" in { + val e = new RuntimeException("errr") + replicator ! Update(KeyA, GCounter(), WriteLocal)(_ ⇒ throw e) + expectMsgType[ModifyFailure[_]].cause should be(e) + } + + "replicate values to new node" in { + join(second, first) + + runOn(first, second) { + within(10.seconds) { + awaitAssert { + replicator ! GetReplicaCount + expectMsg(ReplicaCount(2)) + } + } + } + + enterBarrier("2-nodes") + + runOn(second) { + val changedProbe = TestProbe() + replicator ! Subscribe(KeyA, changedProbe.ref) + // "A" should be replicated via gossip to the new node + within(5.seconds) { + awaitAssert { + replicator ! Get(KeyA, ReadLocal) + val c = expectMsgPF() { case g @ GetSuccess(KeyA, _) ⇒ g.get(KeyA) } + c.value should be(6) + } + } + val c = changedProbe.expectMsgPF() { case c @ Changed(KeyA) ⇒ c.get(KeyA) } + c.value should be(6) + } + + enterBarrier("after-2") + } + + "work in 2 node cluster" in { + + runOn(first, second) { + // start with 20 on both nodes + replicator ! Update(KeyB, GCounter(), WriteLocal)(_ + 20) + expectMsg(UpdateSuccess(KeyB, None)) + + // add 1 on both nodes using WriteTwo + replicator ! Update(KeyB, GCounter(), writeTwo)(_ + 1) + expectMsg(UpdateSuccess(KeyB, None)) + + // the total, after replication should be 42 + awaitAssert { + replicator ! Get(KeyB, readTwo) + val c = expectMsgPF() { case g @ GetSuccess(KeyB, _) ⇒ g.get(KeyB) } + c.value should be(42) + } + } + enterBarrier("update-42") + + runOn(first, second) { + // add 1 on both nodes using WriteAll + replicator ! Update(KeyB, GCounter(), writeAll)(_ + 1) + expectMsg(UpdateSuccess(KeyB, None)) + + // the total, after replication should be 44 + awaitAssert { + replicator ! Get(KeyB, readAll) + val c = expectMsgPF() { case g @ GetSuccess(KeyB, _) ⇒ g.get(KeyB) } + c.value should be(44) + } + } + enterBarrier("update-44") + + runOn(first, second) { + // add 1 on both nodes using WriteMajority + replicator ! Update(KeyB, GCounter(), writeMajority)(_ + 1) + expectMsg(UpdateSuccess(KeyB, None)) + + // the total, after replication should be 46 + awaitAssert { + replicator ! Get(KeyB, readMajority) + val c = expectMsgPF() { case g @ GetSuccess(KeyB, _) ⇒ g.get(KeyB) } + c.value should be(46) + } + } + + enterBarrier("after-3") + } + + "be replicated after succesful update" in { + val changedProbe = TestProbe() + runOn(first, second) { + replicator ! Subscribe(KeyC, changedProbe.ref) + } + + runOn(first) { + replicator ! Update(KeyC, GCounter(), writeTwo)(_ + 30) + expectMsg(UpdateSuccess(KeyC, None)) + changedProbe.expectMsgPF() { case c @ Changed(KeyC) ⇒ c.get(KeyC).value } should be(30) + + replicator ! Update(KeyY, GCounter(), writeTwo)(_ + 30) + expectMsg(UpdateSuccess(KeyY, None)) + + replicator ! Update(KeyZ, GCounter(), writeMajority)(_ + 30) + expectMsg(UpdateSuccess(KeyZ, None)) + } + enterBarrier("update-c30") + + runOn(second) { + replicator ! Get(KeyC, ReadLocal) + val c30 = expectMsgPF() { case g @ GetSuccess(KeyC, _) ⇒ g.get(KeyC) } + c30.value should be(30) + changedProbe.expectMsgPF() { case c @ Changed(KeyC) ⇒ c.get(KeyC).value } should be(30) + + // replicate with gossip after WriteLocal + replicator ! Update(KeyC, GCounter(), WriteLocal)(_ + 1) + expectMsg(UpdateSuccess(KeyC, None)) + changedProbe.expectMsgPF() { case c @ Changed(KeyC) ⇒ c.get(KeyC).value } should be(31) + + replicator ! Delete(KeyY, WriteLocal) + expectMsg(DeleteSuccess(KeyY)) + + replicator ! Get(KeyZ, readMajority) + expectMsgPF() { case g @ GetSuccess(KeyZ, _) ⇒ g.get(KeyZ).value } should be(30) + } + enterBarrier("update-c31") + + runOn(first) { + // KeyC and deleted KeyY should be replicated via gossip to the other node + within(5.seconds) { + awaitAssert { + replicator ! Get(KeyC, ReadLocal) + val c = expectMsgPF() { case g @ GetSuccess(KeyC, _) ⇒ g.get(KeyC) } + c.value should be(31) + + replicator ! Get(KeyY, ReadLocal) + expectMsg(DataDeleted(KeyY)) + } + } + changedProbe.expectMsgPF() { case c @ Changed(KeyC) ⇒ c.get(KeyC).value } should be(31) + } + enterBarrier("verified-c31") + + // and also for concurrent updates + runOn(first, second) { + replicator ! Get(KeyC, ReadLocal) + val c31 = expectMsgPF() { case g @ GetSuccess(KeyC, _) ⇒ g.get(KeyC) } + c31.value should be(31) + + val c32 = c31 + 1 + replicator ! Update(KeyC, GCounter(), WriteLocal)(_ + 1) + expectMsg(UpdateSuccess(KeyC, None)) + + within(5.seconds) { + awaitAssert { + replicator ! Get(KeyC, ReadLocal) + val c = expectMsgPF() { case g @ GetSuccess(KeyC, _) ⇒ g.get(KeyC) } + c.value should be(33) + } + } + } + + enterBarrier("after-4") + } + + "converge after partition" in { + runOn(first) { + replicator ! Update(KeyD, GCounter(), writeTwo)(_ + 40) + expectMsg(UpdateSuccess(KeyD, None)) + + testConductor.blackhole(first, second, Direction.Both).await + } + enterBarrier("blackhole-first-second") + + runOn(first, second) { + replicator ! Get(KeyD, ReadLocal) + val c40 = expectMsgPF() { case g @ GetSuccess(KeyD, _) ⇒ g.get(KeyD) } + c40.value should be(40) + replicator ! Update(KeyD, GCounter() + 1, writeTwo)(_ + 1) + expectMsg(UpdateTimeout(KeyD, None)) + replicator ! Update(KeyD, GCounter(), writeTwo)(_ + 1) + expectMsg(UpdateTimeout(KeyD, None)) + } + runOn(first) { + for (n ← 1 to 30) { + val KeyDn = GCounterKey("D" + n) + replicator ! Update(KeyDn, GCounter(), WriteLocal)(_ + n) + expectMsg(UpdateSuccess(KeyDn, None)) + } + } + enterBarrier("updates-during-partion") + + runOn(first) { + testConductor.passThrough(first, second, Direction.Both).await + } + enterBarrier("passThrough-first-second") + + runOn(first, second) { + replicator ! Get(KeyD, readTwo) + val c44 = expectMsgPF() { case g @ GetSuccess(KeyD, _) ⇒ g.get(KeyD) } + c44.value should be(44) + + within(10.seconds) { + awaitAssert { + for (n ← 1 to 30) { + val KeyDn = GCounterKey("D" + n) + replicator ! Get(KeyDn, ReadLocal) + expectMsgPF() { case g @ GetSuccess(KeyDn, _) ⇒ g.get(KeyDn) }.value should be(n) + } + } + } + } + + enterBarrier("after-5") + } + + "support majority quorum write and read with 3 nodes with 1 unreachable" in { + join(third, first) + + runOn(first, second, third) { + within(10.seconds) { + awaitAssert { + replicator ! GetReplicaCount + expectMsg(ReplicaCount(3)) + } + } + } + enterBarrier("3-nodes") + + runOn(first, second, third) { + replicator ! Update(KeyE, GCounter(), writeMajority)(_ + 50) + expectMsg(UpdateSuccess(KeyE, None)) + } + enterBarrier("write-inital-majority") + + runOn(first, second, third) { + replicator ! Get(KeyE, readMajority) + val c150 = expectMsgPF() { case g @ GetSuccess(KeyE, _) ⇒ g.get(KeyE) } + c150.value should be(150) + } + enterBarrier("read-inital-majority") + + runOn(first) { + testConductor.blackhole(first, third, Direction.Both).await + testConductor.blackhole(second, third, Direction.Both).await + } + enterBarrier("blackhole-third") + + runOn(second) { + replicator ! Update(KeyE, GCounter(), WriteLocal)(_ + 1) + expectMsg(UpdateSuccess(KeyE, None)) + } + enterBarrier("local-update-from-second") + + runOn(first) { + // ReadMajority should retrive the previous update from second, before applying the modification + val probe1 = TestProbe() + val probe2 = TestProbe() + replicator.tell(Get(KeyE, readMajority), probe2.ref) + probe2.expectMsgType[GetSuccess[_]] + replicator.tell(Update(KeyE, GCounter(), writeMajority, None) { data ⇒ + probe1.ref ! data.value + data + 1 + }, probe2.ref) + // verify read your own writes, without waiting for the UpdateSuccess reply + // note that the order of the replies are not defined, and therefore we use separate probes + val probe3 = TestProbe() + replicator.tell(Get(KeyE, readMajority), probe3.ref) + probe1.expectMsg(151) + probe2.expectMsg(UpdateSuccess(KeyE, None)) + val c152 = probe3.expectMsgPF() { case g @ GetSuccess(KeyE, _) ⇒ g.get(KeyE) } + c152.value should be(152) + } + enterBarrier("majority-update-from-first") + + runOn(second) { + val probe1 = TestProbe() + replicator.tell(Get(KeyE, readMajority), probe1.ref) + probe1.expectMsgType[GetSuccess[_]] + replicator.tell(Update(KeyE, GCounter(), writeMajority, Some(153))(_ + 1), probe1.ref) + // verify read your own writes, without waiting for the UpdateSuccess reply + // note that the order of the replies are not defined, and therefore we use separate probes + val probe2 = TestProbe() + replicator.tell(Update(KeyE, GCounter(), writeMajority, Some(154))(_ + 1), probe2.ref) + val probe3 = TestProbe() + replicator.tell(Update(KeyE, GCounter(), writeMajority, Some(155))(_ + 1), probe3.ref) + val probe5 = TestProbe() + replicator.tell(Get(KeyE, readMajority), probe5.ref) + probe1.expectMsg(UpdateSuccess(KeyE, Some(153))) + probe2.expectMsg(UpdateSuccess(KeyE, Some(154))) + probe3.expectMsg(UpdateSuccess(KeyE, Some(155))) + val c155 = probe5.expectMsgPF() { case g @ GetSuccess(KeyE, _) ⇒ g.get(KeyE) } + c155.value should be(155) + } + enterBarrier("majority-update-from-second") + + runOn(first, second) { + replicator ! Get(KeyE2, readAll, Some(998)) + expectMsg(GetFailure(KeyE2, Some(998))) + replicator ! Get(KeyE2, ReadLocal) + expectMsg(NotFound(KeyE2, None)) + } + enterBarrier("read-all-fail-update") + + runOn(first) { + testConductor.passThrough(first, third, Direction.Both).await + testConductor.passThrough(second, third, Direction.Both).await + } + enterBarrier("passThrough-third") + + runOn(third) { + replicator ! Get(KeyE, readMajority) + val c155 = expectMsgPF() { case g @ GetSuccess(KeyE, _) ⇒ g.get(KeyE) } + c155.value should be(155) + } + + enterBarrier("after-6") + } + + "converge after many concurrent updates" in within(10.seconds) { + runOn(first, second, third) { + var c = GCounter() + for (i ← 0 until 100) { + c += 1 + replicator ! Update(KeyF, GCounter(), writeTwo)(_ + 1) + } + val results = receiveN(100) + results.map(_.getClass).toSet should be(Set(classOf[UpdateSuccess[_]])) + } + enterBarrier("100-updates-done") + runOn(first, second, third) { + replicator ! Get(KeyF, readTwo) + val c = expectMsgPF() { case g @ GetSuccess(KeyF, _) ⇒ g.get(KeyF) } + c.value should be(3 * 100) + } + enterBarrier("after-7") + } + + "read-repair happens before GetSuccess" in { + runOn(first) { + replicator ! Update(KeyG, ORSet(), writeTwo)(_ + "a" + "b") + expectMsgType[UpdateSuccess[_]] + } + enterBarrier("a-b-added-to-G") + runOn(second) { + replicator ! Get(KeyG, readAll) + expectMsgPF() { case g @ GetSuccess(KeyG, _) ⇒ g.get(KeyG).elements } should be(Set("a", "b")) + replicator ! Get(KeyG, ReadLocal) + expectMsgPF() { case g @ GetSuccess(KeyG, _) ⇒ g.get(KeyG).elements } should be(Set("a", "b")) + } + enterBarrier("after-8") + } + +} + diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/STMultiNodeSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/STMultiNodeSpec.scala new file mode 100644 index 0000000000..0f47ca7b78 --- /dev/null +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/STMultiNodeSpec.scala @@ -0,0 +1,20 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata + +import akka.remote.testkit.MultiNodeSpecCallbacks + +import org.scalatest.{ BeforeAndAfterAll, WordSpecLike } +import org.scalatest.Matchers + +/** + * Hooks up MultiNodeSpec with ScalaTest + */ +trait STMultiNodeSpec extends MultiNodeSpecCallbacks + with WordSpecLike with Matchers with BeforeAndAfterAll { + + override def beforeAll() = multiNodeSpecBeforeAll() + + override def afterAll() = multiNodeSpecAfterAll() +} diff --git a/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedCacheSpec.scala b/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedCacheSpec.scala new file mode 100644 index 0000000000..e594b48958 --- /dev/null +++ b/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedCacheSpec.scala @@ -0,0 +1,187 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package sample.datareplication + +import scala.concurrent.duration._ +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.Props +import akka.cluster.Cluster +import akka.cluster.ddata.DistributedData +import akka.cluster.ddata.LWWMap +import akka.cluster.ddata.Replicator.GetReplicaCount +import akka.cluster.ddata.Replicator.ReplicaCount +import akka.cluster.ddata.STMultiNodeSpec +import akka.remote.testconductor.RoleName +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import com.typesafe.config.ConfigFactory +import akka.cluster.ddata.LWWMapKey + +object ReplicatedCacheSpec extends MultiNodeConfig { + val node1 = role("node-1") + val node2 = role("node-2") + val node3 = role("node-3") + + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = INFO + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.log-dead-letters-during-shutdown = off + """)) + +} + +object ReplicatedCache { + import akka.cluster.ddata.Replicator._ + + def props: Props = Props[ReplicatedCache] + + private final case class Request(key: String, replyTo: ActorRef) + + final case class PutInCache(key: String, value: Any) + final case class GetFromCache(key: String) + final case class Cached(key: String, value: Option[Any]) + final case class Evict(key: String) +} + +class ReplicatedCache() extends Actor { + import akka.cluster.ddata.Replicator._ + import ReplicatedCache._ + + val replicator = DistributedData(context.system).replicator + implicit val cluster = Cluster(context.system) + + def dataKey(entryKey: String): LWWMapKey[Any] = + LWWMapKey("cache-" + math.abs(entryKey.hashCode) % 100) + + def receive = { + case PutInCache(key, value) ⇒ + replicator ! Update(dataKey(key), LWWMap(), WriteLocal)(_ + (key -> value)) + case Evict(key) ⇒ + replicator ! Update(dataKey(key), LWWMap(), WriteLocal)(_ - key) + case GetFromCache(key) ⇒ + replicator ! Get(dataKey(key), ReadLocal, Some(Request(key, sender()))) + case g @ GetSuccess(LWWMapKey(_), Some(Request(key, replyTo))) ⇒ + g.dataValue match { + case data: LWWMap[_] ⇒ data.get(key) match { + case Some(value) ⇒ replyTo ! Cached(key, Some(value)) + case None ⇒ replyTo ! Cached(key, None) + } + } + case NotFound(_, Some(Request(key, replyTo))) ⇒ + replyTo ! Cached(key, None) + case _: UpdateResponse[_] ⇒ // ok + } + +} + +class ReplicatedCacheSpecMultiJvmNode1 extends ReplicatedCacheSpec +class ReplicatedCacheSpecMultiJvmNode2 extends ReplicatedCacheSpec +class ReplicatedCacheSpecMultiJvmNode3 extends ReplicatedCacheSpec + +class ReplicatedCacheSpec extends MultiNodeSpec(ReplicatedCacheSpec) with STMultiNodeSpec with ImplicitSender { + import ReplicatedCacheSpec._ + import ReplicatedCache._ + + override def initialParticipants = roles.size + + val cluster = Cluster(system) + val replicatedCache = system.actorOf(ReplicatedCache.props) + + def join(from: RoleName, to: RoleName): Unit = { + runOn(from) { + cluster join node(to).address + } + enterBarrier(from.name + "-joined") + } + + "Demo of a replicated cache" must { + "join cluster" in within(10.seconds) { + join(node1, node1) + join(node2, node1) + join(node3, node1) + + awaitAssert { + DistributedData(system).replicator ! GetReplicaCount + expectMsg(ReplicaCount(roles.size)) + } + enterBarrier("after-1") + } + + "replicate cached entry" in within(10.seconds) { + runOn(node1) { + replicatedCache ! PutInCache("key1", "A") + } + + awaitAssert { + val probe = TestProbe() + replicatedCache.tell(GetFromCache("key1"), probe.ref) + probe.expectMsg(Cached("key1", Some("A"))) + } + + enterBarrier("after-2") + } + + "replicate many cached entries" in within(10.seconds) { + runOn(node1) { + for (i ← 100 to 200) + replicatedCache ! PutInCache("key" + i, i) + } + + awaitAssert { + val probe = TestProbe() + for (i ← 100 to 200) { + replicatedCache.tell(GetFromCache("key" + i), probe.ref) + probe.expectMsg(Cached("key" + i, Some(i))) + } + } + + enterBarrier("after-3") + } + + "replicate evicted entry" in within(15.seconds) { + runOn(node1) { + replicatedCache ! PutInCache("key2", "B") + } + + awaitAssert { + val probe = TestProbe() + replicatedCache.tell(GetFromCache("key2"), probe.ref) + probe.expectMsg(Cached("key2", Some("B"))) + } + enterBarrier("key2-replicated") + + runOn(node3) { + replicatedCache ! Evict("key2") + } + + awaitAssert { + val probe = TestProbe() + replicatedCache.tell(GetFromCache("key2"), probe.ref) + probe.expectMsg(Cached("key2", None)) + } + + enterBarrier("after-4") + } + + "replicate updated cached entry" in within(10.seconds) { + runOn(node2) { + replicatedCache ! PutInCache("key1", "A2") + replicatedCache ! PutInCache("key1", "A3") + } + + awaitAssert { + val probe = TestProbe() + replicatedCache.tell(GetFromCache("key1"), probe.ref) + probe.expectMsg(Cached("key1", Some("A3"))) + } + + enterBarrier("after-5") + } + + } + +} + diff --git a/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedMetricsSpec.scala b/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedMetricsSpec.scala new file mode 100644 index 0000000000..2fddea2f7e --- /dev/null +++ b/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedMetricsSpec.scala @@ -0,0 +1,200 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package sample.datareplication + +import java.lang.management.ManagementFactory +import java.lang.management.MemoryMXBean +import scala.concurrent.duration._ +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.actor.Address +import akka.actor.Props +import akka.cluster.Cluster +import akka.cluster.ClusterEvent.{ InitialStateAsEvents, MemberUp, MemberRemoved } +import akka.cluster.ddata.DistributedData +import akka.cluster.ddata.LWWMap +import akka.cluster.ddata.Replicator.GetReplicaCount +import akka.cluster.ddata.Replicator.ReplicaCount +import akka.cluster.ddata.STMultiNodeSpec +import akka.remote.testconductor.RoleName +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import com.typesafe.config.ConfigFactory +import akka.cluster.ddata.LWWMapKey + +object ReplicatedMetricsSpec extends MultiNodeConfig { + val node1 = role("node-1") + val node2 = role("node-2") + val node3 = role("node-3") + + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = INFO + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.log-dead-letters-during-shutdown = off + """)) + +} + +object ReplicatedMetrics { + import akka.cluster.ddata.Replicator._ + + def props(measureInterval: FiniteDuration, cleanupInterval: FiniteDuration): Props = + Props(new ReplicatedMetrics(measureInterval, cleanupInterval)) + + def props: Props = props(1.second, 1.minute) + + private case object Tick + private case object Cleanup + + case class UsedHeap(percentPerNode: Map[String, Double]) { + override def toString = + percentPerNode.toSeq.sortBy(_._1).map { + case (key, value) ⇒ key + " --> " + value + " %" + }.mkString("\n") + } + + def nodeKey(address: Address): String = address.host.get + ":" + address.port.get + +} + +class ReplicatedMetrics(measureInterval: FiniteDuration, cleanupInterval: FiniteDuration) + extends Actor with ActorLogging { + import akka.cluster.ddata.Replicator._ + import ReplicatedMetrics._ + + val replicator = DistributedData(context.system).replicator + implicit val cluster = Cluster(context.system) + val node = nodeKey(cluster.selfAddress) + + val tickTask = context.system.scheduler.schedule(measureInterval, measureInterval, + self, Tick)(context.dispatcher) + val cleanupTask = context.system.scheduler.schedule(cleanupInterval, cleanupInterval, + self, Cleanup)(context.dispatcher) + val memoryMBean: MemoryMXBean = ManagementFactory.getMemoryMXBean + + val UsedHeapKey = LWWMapKey[Long]("usedHeap") + val MaxHeapKey = LWWMapKey[Long]("maxHeap") + + replicator ! Subscribe(UsedHeapKey, self) + replicator ! Subscribe(MaxHeapKey, self) + + cluster.subscribe(self, InitialStateAsEvents, classOf[MemberUp], classOf[MemberRemoved]) + + override def postStop(): Unit = { + tickTask.cancel() + cluster.unsubscribe(self) + super.postStop() + } + + var maxHeap = Map.empty[String, Long] + var nodesInCluster = Set.empty[String] + + def receive = { + case Tick ⇒ + val heap = memoryMBean.getHeapMemoryUsage + val used = heap.getUsed + val max = heap.getMax + replicator ! Update(UsedHeapKey, LWWMap.empty[Long], WriteLocal)(_ + (node -> used)) + replicator ! Update(MaxHeapKey, LWWMap.empty[Long], WriteLocal) { data ⇒ + data.get(node) match { + case Some(`max`) ⇒ data // unchanged + case _ ⇒ data + (node -> max) + } + } + + case c @ Changed(MaxHeapKey) ⇒ + maxHeap = c.get(MaxHeapKey).entries + + case c @ Changed(UsedHeapKey) ⇒ + val usedHeapPercent = UsedHeap(c.get(UsedHeapKey).entries.collect { + case (key, value) if maxHeap.contains(key) ⇒ + (key -> (value.toDouble / maxHeap(key)) * 100.0) + }) + log.debug("Node {} observed:\n{}", node, usedHeapPercent) + context.system.eventStream.publish(usedHeapPercent) + + case _: UpdateResponse[_] ⇒ // ok + + case MemberUp(m) ⇒ + nodesInCluster += nodeKey(m.address) + + case MemberRemoved(m, _) ⇒ + nodesInCluster -= nodeKey(m.address) + + case Cleanup ⇒ + def cleanupRemoved(data: LWWMap[Long]): LWWMap[Long] = + (data.entries.keySet -- nodesInCluster).foldLeft(data) { case (d, key) ⇒ d - key } + + replicator ! Update(UsedHeapKey, LWWMap.empty[Long], WriteLocal)(cleanupRemoved) + replicator ! Update(MaxHeapKey, LWWMap.empty[Long], WriteLocal)(cleanupRemoved) + } + +} + +class ReplicatedMetricsSpecMultiJvmNode1 extends ReplicatedMetricsSpec +class ReplicatedMetricsSpecMultiJvmNode2 extends ReplicatedMetricsSpec +class ReplicatedMetricsSpecMultiJvmNode3 extends ReplicatedMetricsSpec + +class ReplicatedMetricsSpec extends MultiNodeSpec(ReplicatedMetricsSpec) with STMultiNodeSpec with ImplicitSender { + import ReplicatedMetricsSpec._ + import ReplicatedMetrics._ + + override def initialParticipants = roles.size + + val cluster = Cluster(system) + val replicatedMetrics = system.actorOf(ReplicatedMetrics.props(1.second, 3.seconds)) + + def join(from: RoleName, to: RoleName): Unit = { + runOn(from) { + cluster join node(to).address + } + enterBarrier(from.name + "-joined") + } + + "Demo of a replicated metrics" must { + "join cluster" in within(10.seconds) { + join(node1, node1) + join(node2, node1) + join(node3, node1) + + awaitAssert { + DistributedData(system).replicator ! GetReplicaCount + expectMsg(ReplicaCount(roles.size)) + } + enterBarrier("after-1") + } + + "replicate metrics" in within(10.seconds) { + val probe = TestProbe() + system.eventStream.subscribe(probe.ref, classOf[UsedHeap]) + awaitAssert { + probe.expectMsgType[UsedHeap].percentPerNode.size should be(3) + } + probe.expectMsgType[UsedHeap].percentPerNode.size should be(3) + probe.expectMsgType[UsedHeap].percentPerNode.size should be(3) + enterBarrier("after-2") + } + + "cleanup removed node" in within(15.seconds) { + val node3Address = node(node3).address + runOn(node1) { + cluster.leave(node3Address) + } + runOn(node1, node2) { + val probe = TestProbe() + system.eventStream.subscribe(probe.ref, classOf[UsedHeap]) + awaitAssert { + probe.expectMsgType[UsedHeap].percentPerNode.size should be(2) + } + probe.expectMsgType[UsedHeap].percentPerNode should not contain ( + nodeKey(node3Address)) + } + enterBarrier("after-3") + } + + } + +} + diff --git a/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedServiceRegistrySpec.scala b/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedServiceRegistrySpec.scala new file mode 100644 index 0000000000..ed5a9344ad --- /dev/null +++ b/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedServiceRegistrySpec.scala @@ -0,0 +1,267 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package sample.datareplication + +import scala.concurrent.duration._ +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.actor.ActorRef +import akka.actor.PoisonPill +import akka.actor.Props +import akka.actor.Terminated +import akka.cluster.Cluster +import akka.cluster.ClusterEvent +import akka.cluster.ClusterEvent.LeaderChanged +import akka.cluster.ddata.DistributedData +import akka.cluster.ddata.GSet +import akka.cluster.ddata.ORSet +import akka.cluster.ddata.Replicator.GetReplicaCount +import akka.cluster.ddata.Replicator.ReplicaCount +import akka.cluster.ddata.STMultiNodeSpec +import akka.remote.testconductor.RoleName +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import com.typesafe.config.ConfigFactory +import akka.cluster.ddata.GSetKey +import akka.cluster.ddata.ORSetKey +import akka.cluster.ddata.Key + +object ReplicatedServiceRegistrySpec extends MultiNodeConfig { + val node1 = role("node-1") + val node2 = role("node-2") + val node3 = role("node-3") + + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = INFO + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.log-dead-letters-during-shutdown = off + """)) + + class Service extends Actor { + def receive = { + case s: String ⇒ sender() ! self.path.name + ": " + s + } + } + +} + +object ReplicatedServiceRegistry { + import akka.cluster.ddata.Replicator._ + + val props: Props = Props[ReplicatedServiceRegistry] + + /** + * Register a `service` with a `name`. Several services + * can be registered with the same `name`. + * It will be removed when it is terminated. + */ + final case class Register(name: String, service: ActorRef) + /** + * Lookup services registered for a `name`. [[Bindings]] will + * be sent to `sender()`. + */ + final case class Lookup(name: String) + /** + * Reply for [[Lookup]] + */ + final case class Bindings(name: String, services: Set[ActorRef]) + /** + * Published to `System.eventStream` when services are changed. + */ + final case class BindingChanged(name: String, services: Set[ActorRef]) + + final case class ServiceKey(serviceName: String) extends Key[ORSet[ActorRef]](serviceName) + + private val AllServicesKey = GSetKey[ServiceKey]("service-keys") + +} + +class ReplicatedServiceRegistry() extends Actor with ActorLogging { + import akka.cluster.ddata.Replicator._ + import ReplicatedServiceRegistry._ + + val replicator = DistributedData(context.system).replicator + implicit val cluster = Cluster(context.system) + + var keys = Set.empty[ServiceKey] + var services = Map.empty[String, Set[ActorRef]] + var leader = false + + def serviceKey(serviceName: String): ServiceKey = + ServiceKey("service:" + serviceName) + + override def preStart(): Unit = { + replicator ! Subscribe(AllServicesKey, self) + cluster.subscribe(self, ClusterEvent.InitialStateAsEvents, classOf[ClusterEvent.LeaderChanged]) + } + + override def postStop(): Unit = { + cluster.unsubscribe(self) + } + + def receive = { + case Register(name, service) ⇒ + val dKey = serviceKey(name) + // store the service names in a separate GSet to be able to + // get notifications of new names + if (!keys(dKey)) + replicator ! Update(AllServicesKey, GSet(), WriteLocal)(_ + dKey) + // add the service + replicator ! Update(dKey, ORSet(), WriteLocal)(_ + service) + + case Lookup(key) ⇒ + sender() ! Bindings(key, services.getOrElse(key, Set.empty)) + + case c @ Changed(AllServicesKey) ⇒ + val newKeys = c.get(AllServicesKey).elements + log.debug("Services changed, added: {}, all: {}", (newKeys -- keys), newKeys) + (newKeys -- keys).foreach { dKey ⇒ + // subscribe to get notifications of when services with this name are added or removed + replicator ! Subscribe(dKey, self) + } + keys = newKeys + + case c @ Changed(ServiceKey(serviceName)) ⇒ + val name = serviceName.split(":").tail.mkString + val newServices = c.get(serviceKey(name)).elements + log.debug("Services changed for name [{}]: {}", name, newServices) + services = services.updated(name, newServices) + context.system.eventStream.publish(BindingChanged(name, newServices)) + if (leader) + newServices.foreach(context.watch) // watch is idempotent + + case LeaderChanged(node) ⇒ + // Let one node (the leader) be responsible for removal of terminated services + // to avoid redundant work and too many death watch notifications. + // It is not critical to only do it from one node. + val wasLeader = leader + leader = node.exists(_ == cluster.selfAddress) + // when used with many (> 500) services you must increase the system message buffer + // `akka.remote.system-message-buffer-size` + if (!wasLeader && leader) + for (refs ← services.valuesIterator; ref ← refs) + context.watch(ref) + else if (wasLeader && !leader) + for (refs ← services.valuesIterator; ref ← refs) + context.unwatch(ref) + + case Terminated(ref) ⇒ + val names = services.collect { case (name, refs) if refs.contains(ref) ⇒ name } + names.foreach { name ⇒ + log.debug("Service with name [{}] terminated: {}", name, ref) + replicator ! Update(serviceKey(name), ORSet(), WriteLocal)(_ - ref) + } + + case _: UpdateResponse[_] ⇒ // ok + } + +} + +class ReplicatedServiceRegistrySpecMultiJvmNode1 extends ReplicatedServiceRegistrySpec +class ReplicatedServiceRegistrySpecMultiJvmNode2 extends ReplicatedServiceRegistrySpec +class ReplicatedServiceRegistrySpecMultiJvmNode3 extends ReplicatedServiceRegistrySpec + +class ReplicatedServiceRegistrySpec extends MultiNodeSpec(ReplicatedServiceRegistrySpec) with STMultiNodeSpec with ImplicitSender { + import ReplicatedServiceRegistrySpec._ + import ReplicatedServiceRegistry._ + + override def initialParticipants = roles.size + + val cluster = Cluster(system) + val registry = system.actorOf(ReplicatedServiceRegistry.props) + + def join(from: RoleName, to: RoleName): Unit = { + runOn(from) { + cluster join node(to).address + } + enterBarrier(from.name + "-joined") + } + + "Demo of a replicated service registry" must { + "join cluster" in within(10.seconds) { + join(node1, node1) + join(node2, node1) + join(node3, node1) + + awaitAssert { + DistributedData(system).replicator ! GetReplicaCount + expectMsg(ReplicaCount(roles.size)) + } + enterBarrier("after-1") + } + + "replicate service entry" in within(10.seconds) { + runOn(node1) { + val a1 = system.actorOf(Props[Service], name = "a1") + registry ! Register("a", a1) + } + + awaitAssert { + val probe = TestProbe() + registry.tell(Lookup("a"), probe.ref) + probe.expectMsgType[Bindings].services.map(_.path.name) should be(Set("a1")) + } + + enterBarrier("after-2") + } + + "replicate updated service entry, and publish to even bus" in { + val probe = TestProbe() + system.eventStream.subscribe(probe.ref, classOf[BindingChanged]) + + runOn(node2) { + val a2 = system.actorOf(Props[Service], name = "a2") + registry ! Register("a", a2) + } + + probe.within(10.seconds) { + probe.expectMsgType[BindingChanged].services.map(_.path.name) should be(Set("a1", "a2")) + registry.tell(Lookup("a"), probe.ref) + probe.expectMsgType[Bindings].services.map(_.path.name) should be(Set("a1", "a2")) + } + + enterBarrier("after-4") + } + + "remove terminated service" in { + val probe = TestProbe() + system.eventStream.subscribe(probe.ref, classOf[BindingChanged]) + + runOn(node2) { + registry.tell(Lookup("a"), probe.ref) + val a2 = probe.expectMsgType[Bindings].services.find(_.path.name == "a2").get + a2 ! PoisonPill + } + + probe.within(10.seconds) { + probe.expectMsgType[BindingChanged].services.map(_.path.name) should be(Set("a1")) + registry.tell(Lookup("a"), probe.ref) + probe.expectMsgType[Bindings].services.map(_.path.name) should be(Set("a1")) + } + + enterBarrier("after-5") + } + + "replicate many service entries" in within(10.seconds) { + for (i ← 100 until 200) { + val service = system.actorOf(Props[Service], name = myself.name + "_" + i) + registry ! Register("a" + i, service) + } + + awaitAssert { + val probe = TestProbe() + for (i ← 100 until 200) { + registry.tell(Lookup("a" + i), probe.ref) + probe.expectMsgType[Bindings].services.map(_.path.name) should be(roles.map(_.name + "_" + i).toSet) + } + } + + enterBarrier("after-6") + } + + } + +} + diff --git a/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedShoppingCartSpec.scala b/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedShoppingCartSpec.scala new file mode 100644 index 0000000000..250e02ae49 --- /dev/null +++ b/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedShoppingCartSpec.scala @@ -0,0 +1,214 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package sample.datareplication + +import scala.concurrent.duration._ +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.Props +import akka.cluster.Cluster +import akka.cluster.ddata.DistributedData +import akka.cluster.ddata.LWWMap +import akka.cluster.ddata.Replicator.GetReplicaCount +import akka.cluster.ddata.Replicator.ReplicaCount +import akka.cluster.ddata.STMultiNodeSpec +import akka.remote.testconductor.RoleName +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import com.typesafe.config.ConfigFactory +import akka.cluster.ddata.LWWMapKey + +object ReplicatedShoppingCartSpec extends MultiNodeConfig { + val node1 = role("node-1") + val node2 = role("node-2") + val node3 = role("node-3") + + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = INFO + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.log-dead-letters-during-shutdown = off + """)) + +} + +object ShoppingCart { + import akka.cluster.ddata.Replicator._ + + def props(userId: String): Props = Props(new ShoppingCart(userId)) + + case object GetCart + final case class AddItem(item: LineItem) + final case class RemoveItem(productId: String) + + final case class Cart(items: Set[LineItem]) + final case class LineItem(productId: String, title: String, quantity: Int) + + //#read-write-majority + private val timeout = 3.seconds + private val readMajority = ReadMajority(timeout) + private val writeMajority = WriteMajority(timeout) + //#read-write-majority + +} + +class ShoppingCart(userId: String) extends Actor { + import ShoppingCart._ + import akka.cluster.ddata.Replicator._ + + val replicator = DistributedData(context.system).replicator + implicit val cluster = Cluster(context.system) + + val DataKey = LWWMapKey[LineItem]("cart-" + userId) + + def receive = receiveGetCart + .orElse[Any, Unit](receiveAddItem) + .orElse[Any, Unit](receiveRemoveItem) + .orElse[Any, Unit](receiveOther) + + //#get-cart + def receiveGetCart: Receive = { + case GetCart ⇒ + replicator ! Get(DataKey, readMajority, Some(sender())) + + case g @ GetSuccess(DataKey, Some(replyTo: ActorRef)) ⇒ + val data = g.get(DataKey) + val cart = Cart(data.entries.values.toSet) + replyTo ! cart + + case NotFound(DataKey, Some(replyTo: ActorRef)) ⇒ + replyTo ! Cart(Set.empty) + + case GetFailure(DataKey, Some(replyTo: ActorRef)) ⇒ + // ReadMajority failure, try again with local read + replicator ! Get(DataKey, ReadLocal, Some(replyTo)) + } + //#get-cart + + //#add-item + def receiveAddItem: Receive = { + case cmd @ AddItem(item) ⇒ + val update = Update(DataKey, LWWMap.empty[LineItem], writeMajority, Some(cmd)) { + cart ⇒ updateCart(cart, item) + } + replicator ! update + + case GetFailure(DataKey, Some(AddItem(item))) ⇒ + // ReadMajority of Update failed, fall back to best effort local value + replicator ! Update(DataKey, LWWMap.empty[LineItem], writeMajority, None) { + cart ⇒ updateCart(cart, item) + } + } + //#add-item + + //#remove-item + def receiveRemoveItem: Receive = { + case cmd @ RemoveItem(productId) ⇒ + // Try to fetch latest from a majority of nodes first, since ORMap + // remove must have seen the item to be able to remove it. + replicator ! Get(DataKey, readMajority, Some(cmd)) + + case GetSuccess(DataKey, Some(RemoveItem(productId))) ⇒ + replicator ! Update(DataKey, LWWMap(), writeMajority, None) { + _ - productId + } + + case GetFailure(DataKey, Some(RemoveItem(productId))) ⇒ + // ReadMajority failed, fall back to best effort local value + replicator ! Update(DataKey, LWWMap(), writeMajority, None) { + _ - productId + } + + case NotFound(DataKey, Some(RemoveItem(productId))) ⇒ + // nothing to remove + } + //#remove-item + + def receiveOther: Receive = { + case _: UpdateSuccess[_] | _: UpdateTimeout[_] ⇒ + // UpdateTimeout, will eventually be replicated + case e: UpdateFailure[_] ⇒ throw new IllegalStateException("Unexpected failure: " + e) + } + + def updateCart(data: LWWMap[LineItem], item: LineItem): LWWMap[LineItem] = + data.get(item.productId) match { + case Some(LineItem(_, _, existingQuantity)) ⇒ + data + (item.productId -> item.copy(quantity = existingQuantity + item.quantity)) + case None ⇒ data + (item.productId -> item) + } + +} + +class ReplicatedShoppingCartSpecMultiJvmNode1 extends ReplicatedShoppingCartSpec +class ReplicatedShoppingCartSpecMultiJvmNode2 extends ReplicatedShoppingCartSpec +class ReplicatedShoppingCartSpecMultiJvmNode3 extends ReplicatedShoppingCartSpec + +class ReplicatedShoppingCartSpec extends MultiNodeSpec(ReplicatedShoppingCartSpec) with STMultiNodeSpec with ImplicitSender { + import ReplicatedShoppingCartSpec._ + import ShoppingCart._ + + override def initialParticipants = roles.size + + val cluster = Cluster(system) + val shoppingCart = system.actorOf(ShoppingCart.props("user-1")) + + def join(from: RoleName, to: RoleName): Unit = { + runOn(from) { + cluster join node(to).address + } + enterBarrier(from.name + "-joined") + } + + "Demo of a replicated shopping cart" must { + "join cluster" in within(10.seconds) { + join(node1, node1) + join(node2, node1) + join(node3, node1) + + awaitAssert { + DistributedData(system).replicator ! GetReplicaCount + expectMsg(ReplicaCount(roles.size)) + } + enterBarrier("after-1") + } + + "handle updates directly after start" in within(15.seconds) { + runOn(node2) { + shoppingCart ! ShoppingCart.AddItem(LineItem("1", "Apples", quantity = 2)) + shoppingCart ! ShoppingCart.AddItem(LineItem("2", "Oranges", quantity = 3)) + } + enterBarrier("updates-done") + + awaitAssert { + shoppingCart ! ShoppingCart.GetCart + val cart = expectMsgType[Cart] + cart.items should be(Set(LineItem("1", "Apples", quantity = 2), LineItem("2", "Oranges", quantity = 3))) + } + + enterBarrier("after-2") + } + + "handle updates from different nodes" in within(5.seconds) { + runOn(node2) { + shoppingCart ! ShoppingCart.AddItem(LineItem("1", "Apples", quantity = 5)) + shoppingCart ! ShoppingCart.RemoveItem("2") + } + runOn(node3) { + shoppingCart ! ShoppingCart.AddItem(LineItem("3", "Bananas", quantity = 4)) + } + enterBarrier("updates-done") + + awaitAssert { + shoppingCart ! ShoppingCart.GetCart + val cart = expectMsgType[Cart] + cart.items should be(Set(LineItem("1", "Apples", quantity = 7), LineItem("3", "Bananas", quantity = 4))) + } + + enterBarrier("after-3") + } + + } + +} + diff --git a/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/VotingContestSpec.scala b/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/VotingContestSpec.scala new file mode 100644 index 0000000000..28c5200a38 --- /dev/null +++ b/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/VotingContestSpec.scala @@ -0,0 +1,184 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package sample.datareplication + +import scala.concurrent.duration._ +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.Props +import akka.cluster.Cluster +import akka.cluster.ddata.DistributedData +import akka.cluster.ddata.Flag +import akka.cluster.ddata.PNCounterMap +import akka.cluster.ddata.Replicator.GetReplicaCount +import akka.cluster.ddata.Replicator.ReplicaCount +import akka.cluster.ddata.STMultiNodeSpec +import akka.remote.testconductor.RoleName +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import com.typesafe.config.ConfigFactory +import akka.cluster.ddata.FlagKey +import akka.cluster.ddata.PNCounterMapKey + +object VotingContestSpec extends MultiNodeConfig { + val node1 = role("node-1") + val node2 = role("node-2") + val node3 = role("node-3") + + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = INFO + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.log-dead-letters-during-shutdown = off + """)) + +} + +object VotingService { + case object Open + case object OpenAck + case object Close + case object CloseAck + final case class Vote(participant: String) + case object GetVotes + final case class Votes(result: Map[String, BigInt], open: Boolean) + + private final case class GetVotesReq(replyTo: ActorRef) +} + +class VotingService extends Actor { + import akka.cluster.ddata.Replicator._ + import VotingService._ + + val replicator = DistributedData(context.system).replicator + implicit val cluster = Cluster(context.system) + val OpenedKey = FlagKey("contestOpened") + val ClosedKey = FlagKey("contestClosed") + val CountersKey = PNCounterMapKey("contestCounters") + + replicator ! Subscribe(OpenedKey, self) + + def receive = { + case Open ⇒ + replicator ! Update(OpenedKey, Flag(), WriteAll(5.seconds))(_.switchOn) + becomeOpen() + + case c @ Changed(OpenedKey) if c.get(OpenedKey).enabled ⇒ + becomeOpen() + + case GetVotes ⇒ + sender() ! Votes(Map.empty, open = false) + } + + def becomeOpen(): Unit = { + replicator ! Unsubscribe(OpenedKey, self) + replicator ! Subscribe(ClosedKey, self) + context.become(open orElse getVotes(open = true)) + } + + def open: Receive = { + case v @ Vote(participant) ⇒ + val update = Update(CountersKey, PNCounterMap(), WriteLocal, request = Some(v)) { + _.increment(participant, 1) + } + replicator ! update + + case _: UpdateSuccess[_] ⇒ + + case Close ⇒ + replicator ! Update(ClosedKey, Flag(), WriteAll(5.seconds))(_.switchOn) + context.become(getVotes(open = false)) + + case c @ Changed(ClosedKey) if c.get(ClosedKey).enabled ⇒ + context.become(getVotes(open = false)) + } + + def getVotes(open: Boolean): Receive = { + case GetVotes ⇒ + replicator ! Get(CountersKey, ReadAll(3.seconds), Some(GetVotesReq(sender()))) + + case g @ GetSuccess(CountersKey, Some(GetVotesReq(replyTo))) ⇒ + val data = g.get(CountersKey) + replyTo ! Votes(data.entries, open) + + case NotFound(CountersKey, Some(GetVotesReq(replyTo))) ⇒ + replyTo ! Votes(Map.empty, open) + + case _: GetFailure[_] ⇒ + + case _: UpdateSuccess[_] ⇒ + } + +} + +class VotingContestSpecMultiJvmNode1 extends VotingContestSpec +class VotingContestSpecMultiJvmNode2 extends VotingContestSpec +class VotingContestSpecMultiJvmNode3 extends VotingContestSpec + +class VotingContestSpec extends MultiNodeSpec(VotingContestSpec) with STMultiNodeSpec with ImplicitSender { + import VotingContestSpec._ + + override def initialParticipants = roles.size + + val cluster = Cluster(system) + + def join(from: RoleName, to: RoleName): Unit = { + runOn(from) { + cluster join node(to).address + } + enterBarrier(from.name + "-joined") + } + + "Demo of a replicated voting" must { + + "join cluster" in within(10.seconds) { + join(node1, node1) + join(node2, node1) + join(node3, node1) + + awaitAssert { + DistributedData(system).replicator ! GetReplicaCount + expectMsg(ReplicaCount(roles.size)) + } + enterBarrier("after-1") + } + + "count votes correctly" in within(15.seconds) { + import VotingService._ + val votingService = system.actorOf(Props[VotingService], "votingService") + val N = 1000 + runOn(node1) { + votingService ! Open + for (n ← 1 to N) { + votingService ! Vote("#" + ((n % 20) + 1)) + } + } + runOn(node2, node3) { + // wait for it to open + val p = TestProbe() + awaitAssert { + votingService.tell(GetVotes, p.ref) + p.expectMsgPF(3.seconds) { case Votes(_, true) ⇒ true } + } + for (n ← 1 to N) { + votingService ! Vote("#" + ((n % 20) + 1)) + } + } + enterBarrier("voting-done") + runOn(node3) { + votingService ! Close + } + + val expected = (1 to 20).map(n ⇒ "#" + n -> BigInt(3L * N / 20)).toMap + awaitAssert { + votingService ! GetVotes + expectMsg(3.seconds, Votes(expected, false)) + } + + enterBarrier("after-2") + } + } + +} + diff --git a/akka-distributed-data/src/test/java/akka/cluster/ddata/JavaImplOfReplicatedData.java b/akka-distributed-data/src/test/java/akka/cluster/ddata/JavaImplOfReplicatedData.java new file mode 100644 index 0000000000..d2f34dce75 --- /dev/null +++ b/akka-distributed-data/src/test/java/akka/cluster/ddata/JavaImplOfReplicatedData.java @@ -0,0 +1,29 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata; + +import akka.cluster.UniqueAddress; + +public class JavaImplOfReplicatedData extends AbstractReplicatedData implements RemovedNodePruning { + + @Override + public JavaImplOfReplicatedData merge(ReplicatedData other) { + return this; + } + + @Override + public boolean needPruningFrom(UniqueAddress removedNode) { + return false; + } + + @Override + public JavaImplOfReplicatedData prune(UniqueAddress removedNode, UniqueAddress collapseInto) { + return this; + } + + @Override + public JavaImplOfReplicatedData pruningCleanup(UniqueAddress removedNode) { + return this; + } +} diff --git a/akka-distributed-data/src/test/resources/reference.conf b/akka-distributed-data/src/test/resources/reference.conf new file mode 100644 index 0000000000..1339f90319 --- /dev/null +++ b/akka-distributed-data/src/test/resources/reference.conf @@ -0,0 +1,2 @@ +akka.actor.serialize-messages = on +akka.actor.serialize-creators = on diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/FlagSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/FlagSpec.scala new file mode 100644 index 0000000000..cf7d86be34 --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/FlagSpec.scala @@ -0,0 +1,45 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ + +package akka.cluster.ddata + +import akka.cluster.ddata.Replicator.Changed +import org.scalatest.Matchers +import org.scalatest.WordSpec + +class FlagSpec extends WordSpec with Matchers { + + "A Flag" must { + + "be able to switch on once" in { + val f1 = Flag() + val f2 = f1.switchOn + val f3 = f2.switchOn + f1.enabled should be(false) + f2.enabled should be(true) + f3.enabled should be(true) + } + + "merge by picking true" in { + val f1 = Flag() + val f2 = f1.switchOn + val m1 = f1 merge f2 + m1.enabled should be(true) + val m2 = f2 merge f1 + m2.enabled should be(true) + } + + "have unapply extractor" in { + val f1 = Flag.empty.switchOn + val Flag(value1) = f1 + val value2: Boolean = value1 + Changed(FlagKey("key"))(f1) match { + case c @ Changed(FlagKey("key")) ⇒ + val Flag(value3) = c.dataValue + val value4: Boolean = value3 + value4 should be(true) + } + } + } +} diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/GCounterSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/GCounterSpec.scala new file mode 100644 index 0000000000..dd7bf99c15 --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/GCounterSpec.scala @@ -0,0 +1,171 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ + +package akka.cluster.ddata + +import akka.actor.Address +import akka.cluster.UniqueAddress +import akka.cluster.ddata.Replicator.Changed +import org.scalatest.Matchers +import org.scalatest.WordSpec + +class GCounterSpec extends WordSpec with Matchers { + val node1 = UniqueAddress(Address("akka.tcp", "Sys", "localhost", 2551), 1) + val node2 = UniqueAddress(node1.address.copy(port = Some(2552)), 2) + val node3 = UniqueAddress(node1.address.copy(port = Some(2553)), 3) + + "A GCounter" must { + + "be able to increment each node's record by one" in { + val c1 = GCounter() + + val c2 = c1 increment node1 + val c3 = c2 increment node1 + + val c4 = c3 increment node2 + val c5 = c4 increment node2 + val c6 = c5 increment node2 + + c6.state(node1) should be(2) + c6.state(node2) should be(3) + } + + "be able to increment each node's record by arbitrary delta" in { + val c1 = GCounter() + + val c2 = c1 increment (node1, 3) + val c3 = c2 increment (node1, 4) + + val c4 = c3 increment (node2, 2) + val c5 = c4 increment (node2, 7) + val c6 = c5 increment node2 + + c6.state(node1) should be(7) + c6.state(node2) should be(10) + } + + "be able to summarize the history to the correct aggregated value" in { + val c1 = GCounter() + + val c2 = c1 increment (node1, 3) + val c3 = c2 increment (node1, 4) + + val c4 = c3 increment (node2, 2) + val c5 = c4 increment (node2, 7) + val c6 = c5 increment node2 + + c6.state(node1) should be(7) + c6.state(node2) should be(10) + + c6.value should be(17) + } + + "be able to have its history correctly merged with another GCounter 1" in { + // counter 1 + val c11 = GCounter() + val c12 = c11 increment (node1, 3) + val c13 = c12 increment (node1, 4) + val c14 = c13 increment (node2, 2) + val c15 = c14 increment (node2, 7) + val c16 = c15 increment node2 + + c16.state(node1) should be(7) + c16.state(node2) should be(10) + c16.value should be(17) + + // counter 1 + val c21 = GCounter() + val c22 = c21 increment (node1, 2) + val c23 = c22 increment (node1, 2) + val c24 = c23 increment (node2, 3) + val c25 = c24 increment (node2, 2) + val c26 = c25 increment node2 + + c26.state(node1) should be(4) + c26.state(node2) should be(6) + c26.value should be(10) + + // merge both ways + val merged1 = c16 merge c26 + merged1.state(node1) should be(7) + merged1.state(node2) should be(10) + merged1.value should be(17) + + val merged2 = c26 merge c16 + merged2.state(node1) should be(7) + merged2.state(node2) should be(10) + merged2.value should be(17) + } + + "be able to have its history correctly merged with another GCounter 2" in { + // counter 1 + val c11 = GCounter() + val c12 = c11 increment (node1, 2) + val c13 = c12 increment (node1, 2) + val c14 = c13 increment (node2, 2) + val c15 = c14 increment (node2, 7) + val c16 = c15 increment node2 + + c16.state(node1) should be(4) + c16.state(node2) should be(10) + c16.value should be(14) + + // counter 1 + val c21 = GCounter() + val c22 = c21 increment (node1, 3) + val c23 = c22 increment (node1, 4) + val c24 = c23 increment (node2, 3) + val c25 = c24 increment (node2, 2) + val c26 = c25 increment node2 + + c26.state(node1) should be(7) + c26.state(node2) should be(6) + c26.value should be(13) + + // merge both ways + val merged1 = c16 merge c26 + merged1.state(node1) should be(7) + merged1.state(node2) should be(10) + merged1.value should be(17) + + val merged2 = c26 merge c16 + merged2.state(node1) should be(7) + merged2.state(node2) should be(10) + merged2.value should be(17) + } + + "have support for pruning" in { + val c1 = GCounter() + val c2 = c1 increment node1 + val c3 = c2 increment node2 + c2.needPruningFrom(node1) should be(true) + c2.needPruningFrom(node2) should be(false) + c3.needPruningFrom(node1) should be(true) + c3.needPruningFrom(node2) should be(true) + c3.value should be(2) + + val c4 = c3.prune(node1, node2) + c4.needPruningFrom(node2) should be(true) + c4.needPruningFrom(node1) should be(false) + c4.value should be(2) + + val c5 = (c4 increment node1).pruningCleanup(node1) + c5.needPruningFrom(node1) should be(false) + c4.value should be(2) + } + + "have unapply extractor" in { + val c1 = GCounter.empty.increment(node1).increment(node2) + val GCounter(value1) = c1 + val value2: BigInt = value1 + Changed(GCounterKey("key"))(c1) match { + case c @ Changed(GCounterKey("key")) ⇒ + val GCounter(value3) = c.dataValue + val value4: BigInt = value3 + value4 should be(2L) + } + } + + } +} diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/GSetSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/GSetSpec.scala new file mode 100644 index 0000000000..3f9c17bbd5 --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/GSetSpec.scala @@ -0,0 +1,119 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ + +package akka.cluster.ddata + +import akka.cluster.ddata.Replicator.Changed +import org.scalatest.Matchers +import org.scalatest.WordSpec + +class GSetSpec extends WordSpec with Matchers { + + val user1 = """{"username":"john","password":"coltrane"}""" + val user2 = """{"username":"sonny","password":"rollins"}""" + val user3 = """{"username":"charlie","password":"parker"}""" + val user4 = """{"username":"charles","password":"mingus"}""" + + "A GSet" must { + + "be able to add user" in { + val c1 = GSet.empty[String] + + val c2 = c1 + user1 + val c3 = c2 + user2 + + val c4 = c3 + user4 + val c5 = c4 + user3 + + c5.elements should contain(user1) + c5.elements should contain(user2) + c5.elements should contain(user3) + c5.elements should contain(user4) + } + + "be able to have its user set correctly merged with another GSet with unique user sets" in { + // set 1 + val c11 = GSet.empty[String] + + val c12 = c11 + user1 + val c13 = c12 + user2 + + c13.elements should contain(user1) + c13.elements should contain(user2) + + // set 2 + val c21 = GSet.empty[String] + + val c22 = c21 + user3 + val c23 = c22 + user4 + + c23.elements should contain(user3) + c23.elements should contain(user4) + + // merge both ways + val merged1 = c13 merge c23 + merged1.elements should contain(user1) + merged1.elements should contain(user2) + merged1.elements should contain(user3) + merged1.elements should contain(user4) + + val merged2 = c23 merge c13 + merged2.elements should contain(user1) + merged2.elements should contain(user2) + merged2.elements should contain(user3) + merged2.elements should contain(user4) + } + + "be able to have its user set correctly merged with another GSet with overlapping user sets" in { + // set 1 + val c10 = GSet.empty[String] + + val c11 = c10 + user1 + val c12 = c11 + user2 + val c13 = c12 + user3 + + c13.elements should contain(user1) + c13.elements should contain(user2) + c13.elements should contain(user3) + + // set 2 + val c20 = GSet.empty[String] + + val c21 = c20 + user2 + val c22 = c21 + user3 + val c23 = c22 + user4 + + c23.elements should contain(user2) + c23.elements should contain(user3) + c23.elements should contain(user4) + + // merge both ways + val merged1 = c13 merge c23 + merged1.elements should contain(user1) + merged1.elements should contain(user2) + merged1.elements should contain(user3) + merged1.elements should contain(user4) + + val merged2 = c23 merge c13 + merged2.elements should contain(user1) + merged2.elements should contain(user2) + merged2.elements should contain(user3) + merged2.elements should contain(user4) + } + + "have unapply extractor" in { + val s1 = GSet.empty + "a" + "b" + val s2: GSet[String] = s1 + val GSet(elements1) = s1 + val elements2: Set[String] = elements1 + Changed(GSetKey[String]("key"))(s1) match { + case c @ Changed(GSetKey("key")) ⇒ + val GSet(elements3) = c.dataValue + val elements4: Set[String] = elements3 + elements4 should be(Set("a", "b")) + } + } + + } +} diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala new file mode 100644 index 0000000000..6b857c9995 --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala @@ -0,0 +1,63 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ + +package akka.cluster.ddata + +import akka.actor.Address +import akka.cluster.UniqueAddress +import akka.cluster.ddata.Replicator.Changed +import org.scalatest.Matchers +import org.scalatest.WordSpec + +class LWWMapSpec extends WordSpec with Matchers { + import LWWRegister.defaultClock + + val node1 = UniqueAddress(Address("akka.tcp", "Sys", "localhost", 2551), 1) + val node2 = UniqueAddress(node1.address.copy(port = Some(2552)), 2) + + "A LWWMap" must { + + "be able to set entries" in { + val m = LWWMap.empty[Int].put(node1, "a", 1, defaultClock[Int]).put(node2, "b", 2, defaultClock[Int]) + m.entries should be(Map("a" -> 1, "b" -> 2)) + } + + "be able to have its entries correctly merged with another LWWMap with other entries" in { + val m1 = LWWMap.empty.put(node1, "a", 1, defaultClock[Int]).put(node1, "b", 2, defaultClock[Int]) + val m2 = LWWMap.empty.put(node2, "c", 3, defaultClock[Int]) + + // merge both ways + val expected = Map("a" -> 1, "b" -> 2, "c" -> 3) + (m1 merge m2).entries should be(expected) + (m2 merge m1).entries should be(expected) + } + + "be able to remove entry" in { + val m1 = LWWMap.empty.put(node1, "a", 1, defaultClock[Int]).put(node1, "b", 2, defaultClock[Int]) + val m2 = LWWMap.empty.put(node2, "c", 3, defaultClock[Int]) + + val merged1 = m1 merge m2 + + val m3 = merged1.remove(node1, "b") + (merged1 merge m3).entries should be(Map("a" -> 1, "c" -> 3)) + + // but if there is a conflicting update the entry is not removed + val m4 = merged1.put(node2, "b", 22, defaultClock[Int]) + (m3 merge m4).entries should be(Map("a" -> 1, "b" -> 22, "c" -> 3)) + } + + "have unapply extractor" in { + val m1 = LWWMap.empty.put(node1, "a", 1L, defaultClock[Long]) + val LWWMap(entries1) = m1 + val entries2: Map[String, Long] = entries1 + Changed(LWWMapKey[Long]("key"))(m1) match { + case c @ Changed(LWWMapKey("key")) ⇒ + val LWWMap(entries3) = c.dataValue + val entries4: Map[String, Long] = entries3 + entries4 should be(Map("a" -> 1L)) + } + } + + } +} diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWRegisterSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWRegisterSpec.scala new file mode 100644 index 0000000000..ac3e9f0fab --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWRegisterSpec.scala @@ -0,0 +1,92 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ + +package akka.cluster.ddata + +import akka.actor.Address +import akka.cluster.UniqueAddress +import akka.cluster.ddata.Replicator.Changed +import org.scalatest.Matchers +import org.scalatest.WordSpec + +class LWWRegisterSpec extends WordSpec with Matchers { + import LWWRegister.defaultClock + + val node1 = UniqueAddress(Address("akka.tcp", "Sys", "localhost", 2551), 1) + val node2 = UniqueAddress(node1.address.copy(port = Some(2552)), 2) + + "A LWWRegister" must { + "use latest of successive assignments" in { + val r = (1 to 100).foldLeft(LWWRegister(node1, 0, defaultClock[Int])) { + case (r, n) ⇒ + r.value should be(n - 1) + r.withValue(node1, n, defaultClock[Int]) + } + r.value should be(100) + } + + "merge by picking max timestamp" in { + val clock = new LWWRegister.Clock[String] { + val i = Iterator.from(100) + override def apply(current: Long, value: String): Long = i.next() + } + val r1 = LWWRegister(node1, "A", clock) + r1.timestamp should be(100) + val r2 = r1.withValue(node2, "B", clock) + r2.timestamp should be(101) + val m1 = r1 merge r2 + m1.value should be("B") + m1.timestamp should be(101) + val m2 = r2 merge r1 + m2.value should be("B") + m2.timestamp should be(101) + } + + "merge by picking least address when same timestamp" in { + val clock = new LWWRegister.Clock[String] { + override def apply(current: Long, value: String): Long = 100 + } + val r1 = LWWRegister(node1, "A", clock) + val r2 = LWWRegister(node2, "B", clock) + val m1 = r1 merge r2 + m1.value should be("A") + val m2 = r2 merge r1 + m2.value should be("A") + } + + "use monotonically increasing defaultClock" in { + (1 to 100).foldLeft(LWWRegister(node1, 0, defaultClock)) { + case (r, n) ⇒ + r.value should be(n - 1) + val r2 = r.withValue(node1, n, defaultClock[Int]) + r2.timestamp should be > r.timestamp + r2 + } + } + + "have unapply extractor" in { + val r1 = LWWRegister(node1, "a", defaultClock) + val LWWRegister(value1) = r1 + val value2: String = value1 + Changed(LWWRegisterKey[String]("key"))(r1) match { + case c @ Changed(LWWRegisterKey("key")) ⇒ + val LWWRegister(value3) = c.dataValue + val value4: String = value3 + value4 should be("a") + } + } + + "can be used as first-write-wins-register" in { + import LWWRegister.reverseClock + val r = (1 to 100).foldLeft(LWWRegister(node1, 0, reverseClock[Int])) { + case (r, n) ⇒ + r.value should be(0) + val newRegister = r.merge(r.withValue(node1, n, reverseClock[Int])) + newRegister should be(r) + newRegister + } + r.value should be(0) + } + } +} diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala new file mode 100644 index 0000000000..dd54752e81 --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala @@ -0,0 +1,81 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ + +package akka.cluster.ddata + +import scala.concurrent.duration._ + +import akka.actor.Actor +import akka.actor.ActorSystem +import akka.actor.Props +import akka.actor.Stash +import akka.cluster.Cluster +import akka.testkit.ImplicitSender +import akka.testkit.TestKit +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfterAll +import org.scalatest.Matchers +import org.scalatest.WordSpecLike + +object LocalConcurrencySpec { + + final case class Add(s: String) + + object Updater { + val key = ORSetKey[String]("key") + } + + class Updater extends Actor with Stash { + implicit val cluster = Cluster(context.system) + val replicator = DistributedData(context.system).replicator + + def receive = { + case s: String ⇒ + val update = Replicator.Update(Updater.key, ORSet.empty[String], Replicator.WriteLocal)(_ + s) + replicator ! update + } + } +} + +class LocalConcurrencySpec(_system: ActorSystem) extends TestKit(_system) + with WordSpecLike with Matchers with BeforeAndAfterAll with ImplicitSender { + import LocalConcurrencySpec._ + + def this() { + this(ActorSystem("LocalConcurrencySpec", + ConfigFactory.parseString(""" + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.remote.netty.tcp.port=0 + """))) + } + + override def afterAll(): Unit = { + shutdown(system) + } + + val replicator = DistributedData(system).replicator + + "Updates from same node" must { + + "be possible to do from two actors" in { + val updater1 = system.actorOf(Props[Updater], "updater1") + val updater2 = system.actorOf(Props[Updater], "updater2") + + val numMessages = 100 + for (n ← 1 to numMessages) { + updater1 ! s"a$n" + updater2 ! s"b$n" + } + + val expected = ((1 to numMessages).map("a" + _) ++ (1 to numMessages).map("b" + _)).toSet + awaitAssert { + replicator ! Replicator.Get(Updater.key, Replicator.ReadLocal) + val ORSet(elements) = expectMsgType[Replicator.GetSuccess[_]].get(Updater.key) + elements should be(expected) + } + + } + + } +} diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala new file mode 100644 index 0000000000..2a6b46ee93 --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala @@ -0,0 +1,205 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ + +package akka.cluster.ddata + +import akka.actor.Address +import akka.cluster.UniqueAddress +import akka.cluster.ddata.Replicator.Changed +import org.scalatest.Matchers +import org.scalatest.WordSpec + +class ORMapSpec extends WordSpec with Matchers { + + val node1 = UniqueAddress(Address("akka.tcp", "Sys", "localhost", 2551), 1) + val node2 = UniqueAddress(node1.address.copy(port = Some(2552)), 2) + + "A ORMap" must { + + "be able to add entries" in { + val m = ORMap().put(node1, "a", GSet() + "A").put(node1, "b", GSet() + "B") + val GSet(a) = m.entries("a") + a should be(Set("A")) + val GSet(b) = m.entries("b") + b should be(Set("B")) + + val m2 = m.put(node1, "a", GSet() + "C") + val GSet(a2) = m2.entries("a") + a2 should be(Set("C")) + + } + + "be able to remove entry" in { + val m = ORMap().put(node1, "a", GSet() + "A").put(node1, "b", GSet() + "B").remove(node1, "a") + m.entries.keySet should not contain ("a") + m.entries.keySet should contain("b") + } + + "be able to add removed" in { + val m = ORMap().put(node1, "a", GSet() + "A").put(node1, "b", GSet() + "B").remove(node1, "a") + m.entries.keySet should not contain ("a") + m.entries.keySet should contain("b") + val m2 = m.put(node1, "a", GSet() + "C") + m2.entries.keySet should contain("a") + m2.entries.keySet should contain("b") + } + + "be able to have its entries correctly merged with another ORMap with other entries" in { + val m1 = ORMap().put(node1, "a", GSet() + "A").put(node1, "b", GSet() + "B") + val m2 = ORMap().put(node2, "c", GSet() + "C") + + // merge both ways + val merged1 = m1 merge m2 + merged1.entries.keySet should contain("a") + merged1.entries.keySet should contain("b") + merged1.entries.keySet should contain("c") + + val merged2 = m2 merge m1 + merged2.entries.keySet should contain("a") + merged2.entries.keySet should contain("b") + merged2.entries.keySet should contain("c") + } + + "be able to have its entries correctly merged with another ORMap with overlapping entries" in { + val m1 = ORMap().put(node1, "a", GSet() + "A1").put(node1, "b", GSet() + "B1"). + remove(node1, "a").put(node1, "d", GSet() + "D1") + val m2 = ORMap().put(node2, "c", GSet() + "C2").put(node2, "a", GSet() + "A2"). + put(node2, "b", GSet() + "B2").remove(node2, "b").put(node2, "d", GSet() + "D2") + + // merge both ways + val merged1 = m1 merge m2 + merged1.entries.keySet should contain("a") + val GSet(a1) = merged1.entries("a") + a1 should be(Set("A2")) + merged1.entries.keySet should contain("b") + val GSet(b1) = merged1.entries("b") + b1 should be(Set("B1")) + merged1.entries.keySet should contain("c") + merged1.entries.keySet should contain("d") + val GSet(d1) = merged1.entries("d") + d1 should be(Set("D1", "D2")) + + val merged2 = m2 merge m1 + merged2.entries.keySet should contain("a") + val GSet(a2) = merged1.entries("a") + a2 should be(Set("A2")) + merged2.entries.keySet should contain("b") + val GSet(b2) = merged2.entries("b") + b2 should be(Set("B1")) + merged2.entries.keySet should contain("c") + merged2.entries.keySet should contain("d") + val GSet(d2) = merged2.entries("d") + d2 should be(Set("D1", "D2")) + } + + "illustrate the danger of using remove+put to replace an entry" in { + val m1 = ORMap.empty.put(node1, "a", GSet.empty + "A").put(node1, "b", GSet.empty + "B") + val m2 = ORMap.empty.put(node2, "c", GSet.empty + "C") + + val merged1 = m1 merge m2 + + val m3 = merged1.remove(node1, "b").put(node1, "b", GSet.empty + "B2") + // same thing if only put is used + // val m3 = merged1.put(node1, "b", GSet() + "B2") + val merged2 = merged1 merge m3 + + merged2.entries("a").elements should be(Set("A")) + // note that B is included, because GSet("B") is merged with GSet("B2") + merged2.entries("b").elements should be(Set("B", "B2")) + merged2.entries("c").elements should be(Set("C")) + } + + "not allow put for ORSet elements type" in { + val m = ORMap().put(node1, "a", ORSet().add(node1, "A")) + + intercept[IllegalArgumentException] { + m.put(node1, "a", ORSet().add(node1, "B")) + } + } + + "be able to update entry" in { + val m1 = ORMap.empty[ORSet[String]].put(node1, "a", ORSet.empty.add(node1, "A")) + .put(node1, "b", ORSet.empty.add(node1, "B01").add(node1, "B02").add(node1, "B03")) + val m2 = ORMap.empty[ORSet[String]].put(node2, "c", ORSet.empty.add(node2, "C")) + + val merged1: ORMap[ORSet[String]] = m1 merge m2 + + val m3 = merged1.updated(node1, "b", ORSet.empty[String])(_.clear(node1).add(node1, "B2")) + + val merged2 = merged1 merge m3 + merged2.entries("a").elements should be(Set("A")) + merged2.entries("b").elements should be(Set("B2")) + merged2.entries("c").elements should be(Set("C")) + + val m4 = merged1.updated(node2, "b", ORSet.empty[String])(_.add(node2, "B3")) + val merged3 = m3 merge m4 + merged3.entries("a").elements should be(Set("A")) + merged3.entries("b").elements should be(Set("B2", "B3")) + merged3.entries("c").elements should be(Set("C")) + } + + "be able to update ORSet entry with remove+put" in { + val m1 = ORMap.empty[ORSet[String]].put(node1, "a", ORSet.empty.add(node1, "A01")) + .updated(node1, "a", ORSet.empty[String])(_.add(node1, "A02")) + .updated(node1, "a", ORSet.empty[String])(_.add(node1, "A03")) + .put(node1, "b", ORSet.empty.add(node1, "B01").add(node1, "B02").add(node1, "B03")) + val m2 = ORMap.empty[ORSet[String]].put(node2, "c", ORSet.empty.add(node2, "C")) + + val merged1 = m1 merge m2 + + // note that remove + put work because the new VersionVector version is incremented + // from a global counter + val m3 = merged1.remove(node1, "b").put(node1, "b", ORSet.empty.add(node1, "B2")) + + val merged2 = merged1 merge m3 + merged2.entries("a").elements should be(Set("A01", "A02", "A03")) + merged2.entries("b").elements should be(Set("B2")) + merged2.entries("c").elements should be(Set("C")) + + val m4 = merged1.updated(node2, "b", ORSet.empty[String])(_.add(node2, "B3")) + val merged3 = m3 merge m4 + merged3.entries("a").elements should be(Set("A01", "A02", "A03")) + merged3.entries("b").elements should be(Set("B2", "B3")) + merged3.entries("c").elements should be(Set("C")) + } + + "be able to update ORSet entry with remove -> merge -> put" in { + val m1 = ORMap.empty.put(node1, "a", ORSet.empty.add(node1, "A")) + .put(node1, "b", ORSet.empty.add(node1, "B01").add(node1, "B02").add(node1, "B03")) + val m2 = ORMap.empty.put(node2, "c", ORSet.empty.add(node2, "C")) + + val merged1 = m1 merge m2 + + val m3 = merged1.remove(node1, "b") + + val merged2 = merged1 merge m3 + merged2.entries("a").elements should be(Set("A")) + merged2.contains("b") should be(false) + merged2.entries("c").elements should be(Set("C")) + + val m4 = merged2.put(node1, "b", ORSet.empty.add(node1, "B2")) + val m5 = merged2.updated(node2, "c", ORSet.empty[String])(_.add(node2, "C2")) + .put(node2, "b", ORSet.empty.add(node2, "B3")) + + val merged3 = m5 merge m4 + merged3.entries("a").elements should be(Set("A")) + merged3.entries("b").elements should be(Set("B2", "B3")) + merged3.entries("c").elements should be(Set("C", "C2")) + } + + "have unapply extractor" in { + val m1 = ORMap.empty.put(node1, "a", Flag(true)).put(node2, "b", Flag(false)) + val m2: ORMap[Flag] = m1 + val ORMap(entries1) = m1 + val entries2: Map[String, Flag] = entries1 + Changed(ORMapKey[Flag]("key"))(m1) match { + case c @ Changed(ORMapKey("key")) ⇒ + val ORMap(entries3) = c.dataValue + val entries4: Map[String, ReplicatedData] = entries3 + entries4 should be(Map("a" -> Flag(true), "b" -> Flag(false))) + } + } + + } +} diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala new file mode 100644 index 0000000000..30e189b304 --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala @@ -0,0 +1,355 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ + +package akka.cluster.ddata + +import scala.collection.immutable.TreeMap + +import akka.actor.Address +import akka.cluster.UniqueAddress +import akka.cluster.ddata.Replicator.Changed +import org.scalatest.Matchers +import org.scalatest.WordSpec + +class ORSetSpec extends WordSpec with Matchers { + + val node1 = UniqueAddress(Address("akka.tcp", "Sys", "localhost", 2551), 1) + val node2 = UniqueAddress(node1.address.copy(port = Some(2552)), 2) + + val nodeA = UniqueAddress(Address("akka.tcp", "Sys", "a", 2552), 1) + val nodeB = UniqueAddress(nodeA.address.copy(host = Some("b")), 2) + val nodeC = UniqueAddress(nodeA.address.copy(host = Some("c")), 3) + val nodeD = UniqueAddress(nodeA.address.copy(host = Some("d")), 4) + val nodeE = UniqueAddress(nodeA.address.copy(host = Some("e")), 5) + val nodeF = UniqueAddress(nodeA.address.copy(host = Some("f")), 6) + val nodeG = UniqueAddress(nodeA.address.copy(host = Some("g")), 7) + val nodeH = UniqueAddress(nodeA.address.copy(host = Some("h")), 8) + + val user1 = """{"username":"john","password":"coltrane"}""" + val user2 = """{"username":"sonny","password":"rollins"}""" + val user3 = """{"username":"charlie","password":"parker"}""" + val user4 = """{"username":"charles","password":"mingus"}""" + + "A ORSet" must { + + "be able to add user" in { + val c1 = ORSet() + + val c2 = c1.add(node1, user1) + val c3 = c2.add(node1, user2) + + val c4 = c3.add(node1, user4) + val c5 = c4.add(node1, user3) + + c5.elements should contain(user1) + c5.elements should contain(user2) + c5.elements should contain(user3) + c5.elements should contain(user4) + } + + "be able to remove added user" in { + val c1 = ORSet() + + val c2 = c1.add(node1, user1) + val c3 = c2.add(node1, user2) + + val c4 = c3.remove(node1, user2) + val c5 = c4.remove(node1, user1) + + c5.elements should not contain (user1) + c5.elements should not contain (user2) + } + + "be able to add removed" in { + val c1 = ORSet() + val c2 = c1.remove(node1, user1) + val c3 = c2.add(node1, user1) + c3.elements should contain(user1) + val c4 = c3.remove(node1, user1) + c4.elements should not contain (user1) + val c5 = c4.add(node1, user1) + c5.elements should contain(user1) + } + + "be able to remove and add several times" in { + val c1 = ORSet() + + val c2 = c1.add(node1, user1) + val c3 = c2.add(node1, user2) + val c4 = c3.remove(node1, user1) + c4.elements should not contain (user1) + c4.elements should contain(user2) + + val c5 = c4.add(node1, user1) + val c6 = c5.add(node1, user2) + c6.elements should contain(user1) + c6.elements should contain(user2) + + val c7 = c6.remove(node1, user1) + val c8 = c7.add(node1, user2) + val c9 = c8.remove(node1, user1) + c9.elements should not contain (user1) + c9.elements should contain(user2) + } + + "be able to have its user set correctly merged with another ORSet with unique user sets" in { + // set 1 + val c1 = ORSet().add(node1, user1).add(node1, user2) + c1.elements should contain(user1) + c1.elements should contain(user2) + + // set 2 + val c2 = ORSet().add(node2, user3).add(node2, user4).remove(node2, user3) + + c2.elements should not contain (user3) + c2.elements should contain(user4) + + // merge both ways + val merged1 = c1 merge c2 + merged1.elements should contain(user1) + merged1.elements should contain(user2) + merged1.elements should not contain (user3) + merged1.elements should contain(user4) + + val merged2 = c2 merge c1 + merged2.elements should contain(user1) + merged2.elements should contain(user2) + merged2.elements should not contain (user3) + merged2.elements should contain(user4) + } + + "be able to have its user set correctly merged with another ORSet with overlapping user sets" in { + // set 1 + val c1 = ORSet().add(node1, user1).add(node1, user2).add(node1, user3).remove(node1, user1).remove(node1, user3) + + c1.elements should not contain (user1) + c1.elements should contain(user2) + c1.elements should not contain (user3) + + // set 2 + val c2 = ORSet().add(node2, user1).add(node2, user2).add(node2, user3).add(node2, user4).remove(node2, user3) + + c2.elements should contain(user1) + c2.elements should contain(user2) + c2.elements should not contain (user3) + c2.elements should contain(user4) + + // merge both ways + val merged1 = c1 merge c2 + merged1.elements should contain(user1) + merged1.elements should contain(user2) + merged1.elements should not contain (user3) + merged1.elements should contain(user4) + + val merged2 = c2 merge c1 + merged2.elements should contain(user1) + merged2.elements should contain(user2) + merged2.elements should not contain (user3) + merged2.elements should contain(user4) + } + + "be able to have its user set correctly merged for concurrent updates" in { + val c1 = ORSet().add(node1, user1).add(node1, user2).add(node1, user3) + + c1.elements should contain(user1) + c1.elements should contain(user2) + c1.elements should contain(user3) + + val c2 = c1.add(node2, user1).remove(node2, user2).remove(node2, user3) + + c2.elements should contain(user1) + c2.elements should not contain (user2) + c2.elements should not contain (user3) + + // merge both ways + val merged1 = c1 merge c2 + merged1.elements should contain(user1) + merged1.elements should not contain (user2) + merged1.elements should not contain (user3) + + val merged2 = c2 merge c1 + merged2.elements should contain(user1) + merged2.elements should not contain (user2) + merged2.elements should not contain (user3) + + val c3 = c1.add(node1, user4).remove(node1, user3).add(node1, user2) + + // merge both ways + val merged3 = c2 merge c3 + merged3.elements should contain(user1) + merged3.elements should contain(user2) + merged3.elements should not contain (user3) + merged3.elements should contain(user4) + + val merged4 = c3 merge c2 + merged4.elements should contain(user1) + merged4.elements should contain(user2) + merged4.elements should not contain (user3) + merged4.elements should contain(user4) + } + + "be able to have its user set correctly merged after remove" in { + val c1 = ORSet().add(node1, user1).add(node1, user2) + val c2 = c1.remove(node2, user2) + + // merge both ways + val merged1 = c1 merge c2 + merged1.elements should contain(user1) + merged1.elements should not contain (user2) + + val merged2 = c2 merge c1 + merged2.elements should contain(user1) + merged2.elements should not contain (user2) + + val c3 = c1.add(node1, user3) + + // merge both ways + val merged3 = c3 merge c2 + merged3.elements should contain(user1) + merged3.elements should not contain (user2) + merged3.elements should contain(user3) + + val merged4 = c2 merge c3 + merged4.elements should contain(user1) + merged4.elements should not contain (user2) + merged4.elements should contain(user3) + } + + } + + "ORSet unit test" must { + "verify subtractDots" in { + val dot = new VersionVector(TreeMap(nodeA -> 3, nodeB -> 2, nodeD -> 14, nodeG -> 22)) + val vvector = new VersionVector(TreeMap(nodeA -> 4, nodeB -> 1, nodeC -> 1, nodeD -> 14, nodeE -> 5, nodeF -> 2)) + val expected = new VersionVector(TreeMap(nodeB -> 2, nodeG -> 22)) + ORSet.subtractDots(dot, vvector) should be(expected) + } + + "verify mergeCommonKeys" in { + val commonKeys: Set[String] = Set("K1", "K2") + val thisDot1 = new VersionVector(TreeMap(nodeA -> 3, nodeD -> 7)) + val thisDot2 = new VersionVector(TreeMap(nodeB -> 5, nodeC -> 2)) + val thisVvector = new VersionVector(TreeMap(nodeA -> 3, nodeB -> 5, nodeC -> 2, nodeD -> 7)) + val thisSet = new ORSet( + elementsMap = Map("K1" -> thisDot1, "K2" -> thisDot2), + vvector = thisVvector) + val thatDot1 = new VersionVector(TreeMap(nodeA -> 3)) + val thatDot2 = new VersionVector(TreeMap(nodeB -> 6)) + val thatVvector = new VersionVector(TreeMap(nodeA -> 3, nodeB -> 6, nodeC -> 1, nodeD -> 8)) + val thatSet = new ORSet( + elementsMap = Map("K1" -> thatDot1, "K2" -> thatDot2), + vvector = thatVvector) + + val expectedDots = Map( + "K1" -> new VersionVector(TreeMap(nodeA -> 3)), + "K2" -> new VersionVector(TreeMap(nodeB -> 6, nodeC -> 2))) + + ORSet.mergeCommonKeys(commonKeys, thisSet, thatSet) should be(expectedDots) + } + + "verify mergeDisjointKeys" in { + val keys: Set[Any] = Set("K3", "K4", "K5") + val elements: Map[Any, VersionVector] = Map( + "K3" -> new VersionVector(TreeMap(nodeA -> 4)), + "K4" -> new VersionVector(TreeMap(nodeA -> 3, nodeD -> 8)), + "K5" -> new VersionVector(TreeMap(nodeA -> 2))) + val vvector = new VersionVector(TreeMap(nodeA -> 3, nodeD -> 7)) + val acc: Map[Any, VersionVector] = Map("K1" -> new VersionVector(TreeMap(nodeA -> 3))) + val expectedDots = acc ++ Map( + "K3" -> new VersionVector(TreeMap(nodeA -> 4)), + "K4" -> new VersionVector(TreeMap(nodeD -> 8))) // "a" -> 3 removed, optimized to include only those unseen + + ORSet.mergeDisjointKeys(keys, elements, vvector, acc) should be(expectedDots) + } + + "verify disjoint merge" in { + val a1 = ORSet().add(node1, "bar") + val b1 = ORSet().add(node2, "baz") + val c = a1.merge(b1) + val a2 = a1.remove(node1, "bar") + val d = a2.merge(c) + d.elements should be(Set("baz")) + } + + "verify removed after merge" in { + // Add Z at node1 replica + val a = ORSet().add(node1, "Z") + // Replicate it to some node3, i.e. it has dot 'Z'->{node1 -> 1} + val c = a + // Remove Z at node1 replica + val a2 = a.remove(node1, "Z") + // Add Z at node2, a new replica + val b = ORSet().add(node2, "Z") + // Replicate b to node1, so now node1 has a Z, the one with a Dot of + // {node2 -> 1} and version vector of [{node1 -> 1}, {node2 -> 1}] + val a3 = b.merge(a2) + a3.elements should be(Set("Z")) + // Remove the 'Z' at node2 replica + val b2 = b.remove(node2, "Z") + // Both node3 (c) and node1 (a3) have a 'Z', but when they merge, there should be + // no 'Z' as node3 (c)'s has been removed by node1 and node1 (a3)'s has been removed by + // node2 + c.elements should be(Set("Z")) + a3.elements should be(Set("Z")) + b2.elements should be(Set()) + + a3.merge(c).merge(b2).elements should be(Set.empty) + a3.merge(b2).merge(c).elements should be(Set.empty) + c.merge(b2).merge(a3).elements should be(Set.empty) + c.merge(a3).merge(b2).elements should be(Set.empty) + b2.merge(c).merge(a3).elements should be(Set.empty) + b2.merge(a3).merge(c).elements should be(Set.empty) + } + + "verify removed after merge 2" in { + val a = ORSet().add(node1, "Z") + val b = ORSet().add(node2, "Z") + // replicate node3 + val c = a + val a2 = a.remove(node1, "Z") + // replicate b to node1, now node1 has node2's 'Z' + val a3 = a2.merge(b) + a3.elements should be(Set("Z")) + // Remove node2's 'Z' + val b2 = b.remove(node2, "Z") + // Replicate c to node2, now node2 has node1's old 'Z' + val b3 = b2.merge(c) + b3.elements should be(Set("Z")) + // Merge everytyhing + a3.merge(c).merge(b3).elements should be(Set.empty) + a3.merge(b3).merge(c).elements should be(Set.empty) + c.merge(b3).merge(a3).elements should be(Set.empty) + c.merge(a3).merge(b3).elements should be(Set.empty) + b3.merge(c).merge(a3).elements should be(Set.empty) + b3.merge(a3).merge(c).elements should be(Set.empty) + } + + "have unapply extractor" in { + val s1 = ORSet.empty.add(node1, "a").add(node2, "b") + val s2: ORSet[String] = s1 + val ORSet(elements1) = s1 // `unapply[A](s: ORSet[A])` is used here + val elements2: Set[String] = elements1 + + Changed(ORSetKey[String]("key"))(s1) match { + case c @ Changed(ORSetKey("key")) ⇒ + val x: ORSet[String] = c.dataValue + val ORSet(elements3) = c.dataValue + val elements4: Set[String] = elements3 + elements4 should be(Set("a", "b")) + } + + val msg: Any = Changed(ORSetKey[String]("key"))(s1) + msg match { + case c @ Changed(ORSetKey("key")) ⇒ + val ORSet(elements3) = c.dataValue // `unapply(a: ReplicatedData)` is used here + // if `unapply(a: ReplicatedData)` isn't defined the next line doesn't compile: + // type mismatch; found : scala.collection.immutable.Set[A] where type A required: Set[Any] Note: A <: Any, + // but trait Set is invariant in type A. You may wish to investigate a wildcard type such as _ <: Any. (SLS 3.2.10) + val elements4: Set[Any] = elements3 + elements4 should be(Set("a", "b")) + } + } + + } +} diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala new file mode 100644 index 0000000000..0990bae398 --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala @@ -0,0 +1,62 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ + +package akka.cluster.ddata + +import akka.actor.Address +import akka.cluster.UniqueAddress +import akka.cluster.ddata.Replicator.Changed +import org.scalatest.Matchers +import org.scalatest.WordSpec + +class PNCounterMapSpec extends WordSpec with Matchers { + + val node1 = UniqueAddress(Address("akka.tcp", "Sys", "localhost", 2551), 1) + val node2 = UniqueAddress(node1.address.copy(port = Some(2552)), 2) + + "A PNCounterMap" must { + + "be able to increment and decrement entries" in { + val m = PNCounterMap().increment(node1, "a", 2).increment(node1, "b", 3).decrement(node2, "a", 1) + m.entries should be(Map("a" -> 1, "b" -> 3)) + } + + "be able to have its entries correctly merged with another ORMap with other entries" in { + val m1 = PNCounterMap().increment(node1, "a", 1).increment(node1, "b", 3).increment(node1, "c", 2) + val m2 = PNCounterMap().increment(node2, "c", 5) + + // merge both ways + val expected = Map("a" -> 1, "b" -> 3, "c" -> 7) + (m1 merge m2).entries should be(expected) + (m2 merge m1).entries should be(expected) + } + + "be able to remove entry" in { + val m1 = PNCounterMap().increment(node1, "a", 1).increment(node1, "b", 3).increment(node1, "c", 2) + val m2 = PNCounterMap().increment(node2, "c", 5) + + val merged1 = m1 merge m2 + + val m3 = merged1.remove(node1, "b") + (merged1 merge m3).entries should be(Map("a" -> 1, "c" -> 7)) + + // but if there is a conflicting update the entry is not removed + val m4 = merged1.increment(node2, "b", 10) + (m3 merge m4).entries should be(Map("a" -> 1, "b" -> 13, "c" -> 7)) + } + + "have unapply extractor" in { + val m1 = PNCounterMap.empty.increment(node1, "a", 1).increment(node2, "b", 2) + val PNCounterMap(entries1) = m1 + val entries2: Map[String, BigInt] = entries1 + Changed(PNCounterMapKey("key"))(m1) match { + case c @ Changed(PNCounterMapKey("key")) ⇒ + val PNCounterMap(entries3) = c.dataValue + val entries4: Map[String, BigInt] = entries3 + entries4 should be(Map("a" -> 1L, "b" -> 2L)) + } + } + + } +} diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterSpec.scala new file mode 100644 index 0000000000..3f1b83a4b9 --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterSpec.scala @@ -0,0 +1,172 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ + +package akka.cluster.ddata + +import akka.actor.Address +import akka.cluster.UniqueAddress +import akka.cluster.ddata.Replicator.Changed +import org.scalatest.Matchers +import org.scalatest.WordSpec + +class PNCounterSpec extends WordSpec with Matchers { + val node1 = UniqueAddress(Address("akka.tcp", "Sys", "localhost", 2551), 1) + val node2 = UniqueAddress(node1.address.copy(port = Some(2552)), 2) + + "A PNCounter" must { + + "be able to increment each node's record by one" in { + val c1 = PNCounter() + + val c2 = c1 increment node1 + val c3 = c2 increment node1 + + val c4 = c3 increment node2 + val c5 = c4 increment node2 + val c6 = c5 increment node2 + + c6.increments.state(node1) should be(2) + c6.increments.state(node2) should be(3) + } + + "be able to decrement each node's record by one" in { + val c1 = PNCounter() + + val c2 = c1 decrement node1 + val c3 = c2 decrement node1 + + val c4 = c3 decrement node2 + val c5 = c4 decrement node2 + val c6 = c5 decrement node2 + + c6.decrements.state(node1) should be(2) + c6.decrements.state(node2) should be(3) + } + + "be able to increment each node's record by arbitrary delta" in { + val c1 = PNCounter() + + val c2 = c1 increment (node1, 3) + val c3 = c2 increment (node1, 4) + + val c4 = c3 increment (node2, 2) + val c5 = c4 increment (node2, 7) + val c6 = c5 increment node2 + + c6.increments.state(node1) should be(7) + c6.increments.state(node2) should be(10) + } + + "be able to decrement each node's record by arbitrary delta" in { + val c1 = PNCounter() + + val c2 = c1 decrement (node1, 3) + val c3 = c2 decrement (node1, 4) + + val c4 = c3 decrement (node2, 2) + val c5 = c4 decrement (node2, 7) + val c6 = c5 decrement node2 + + c6.decrements.state(node1) should be(7) + c6.decrements.state(node2) should be(10) + } + + "be able to increment and decrement each node's record by arbitrary delta" in { + val c1 = PNCounter() + + val c2 = c1 increment (node1, 3) + val c3 = c2 decrement (node1, 2) + + val c4 = c3 increment (node2, 5) + val c5 = c4 decrement (node2, 2) + val c6 = c5 increment node2 + + c6.increments.value should be(9) + c6.decrements.value should be(4) + } + + "be able to summarize the history to the correct aggregated value of increments and decrements" in { + val c1 = PNCounter() + + val c2 = c1 increment (node1, 3) + val c3 = c2 decrement (node1, 2) + + val c4 = c3 increment (node2, 5) + val c5 = c4 decrement (node2, 2) + val c6 = c5 increment node2 + + c6.increments.value should be(9) + c6.decrements.value should be(4) + + c6.value should be(5) + } + + "be able to have its history correctly merged with another GCounter" in { + // counter 1 + val c11 = PNCounter() + val c12 = c11 increment (node1, 3) + val c13 = c12 decrement (node1, 2) + val c14 = c13 increment (node2, 5) + val c15 = c14 decrement (node2, 2) + val c16 = c15 increment node2 + + c16.increments.value should be(9) + c16.decrements.value should be(4) + c16.value should be(5) + + // counter 1 + val c21 = PNCounter() + val c22 = c21 increment (node1, 2) + val c23 = c22 decrement (node1, 3) + val c24 = c23 increment (node2, 3) + val c25 = c24 decrement (node2, 2) + val c26 = c25 increment node2 + + c26.increments.value should be(6) + c26.decrements.value should be(5) + c26.value should be(1) + + // merge both ways + val merged1 = c16 merge c26 + merged1.increments.value should be(9) + merged1.decrements.value should be(5) + merged1.value should be(4) + + val merged2 = c26 merge c16 + merged2.increments.value should be(9) + merged2.decrements.value should be(5) + merged2.value should be(4) + } + + "have support for pruning" in { + val c1 = PNCounter() + val c2 = c1 increment node1 + val c3 = c2 decrement node2 + c2.needPruningFrom(node1) should be(true) + c2.needPruningFrom(node2) should be(false) + c3.needPruningFrom(node1) should be(true) + c3.needPruningFrom(node2) should be(true) + + val c4 = c3.prune(node1, node2) + c4.needPruningFrom(node2) should be(true) + c4.needPruningFrom(node1) should be(false) + + val c5 = (c4 increment node1).pruningCleanup(node1) + c5.needPruningFrom(node1) should be(false) + } + + "have unapply extractor" in { + val c1 = PNCounter.empty.increment(node1).increment(node1).decrement(node2) + val PNCounter(value1) = c1 + val value2: BigInt = value1 + Changed(PNCounterKey("key"))(c1) match { + case c @ Changed(PNCounterKey("key")) ⇒ + val PNCounter(value3) = c.dataValue + val value4: BigInt = value3 + value4 should be(1L) + } + } + + } +} diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/PruningStateSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PruningStateSpec.scala new file mode 100644 index 0000000000..ae60aecbf1 --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PruningStateSpec.scala @@ -0,0 +1,46 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ + +package akka.cluster.ddata + +import akka.actor.Address +import akka.cluster.UniqueAddress +import org.scalatest.Matchers +import org.scalatest.WordSpec + +class PruningStateSpec extends WordSpec with Matchers { + import PruningState._ + + val node1 = UniqueAddress(Address("akka.tcp", "Sys", "localhost", 2551), 1) + val node2 = UniqueAddress(node1.address.copy(port = Some(2552)), 2) + val node3 = UniqueAddress(node1.address.copy(port = Some(2553)), 3) + val node4 = UniqueAddress(node1.address.copy(port = Some(2554)), 4) + + "Pruning state" must { + + "merge phase correctly" in { + val p1 = PruningState(node1, PruningInitialized(Set.empty)) + val p2 = PruningState(node1, PruningPerformed) + p1.merge(p2).phase should be(PruningPerformed) + p2.merge(p1).phase should be(PruningPerformed) + } + + "merge owner correctly" in { + val p1 = PruningState(node1, PruningInitialized(Set.empty)) + val p2 = PruningState(node2, PruningInitialized(Set.empty)) + val expected = PruningState(node1, PruningInitialized(Set.empty)) + p1.merge(p2) should be(expected) + p2.merge(p1) should be(expected) + } + + "merge seen correctly" in { + val p1 = PruningState(node1, PruningInitialized(Set(node2.address))) + val p2 = PruningState(node1, PruningInitialized(Set(node4.address))) + val expected = PruningState(node1, PruningInitialized(Set(node2.address, node4.address))) + p1.merge(p2) should be(expected) + p2.merge(p1) should be(expected) + } + + } +} diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/VersionVectorSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/VersionVectorSpec.scala new file mode 100644 index 0000000000..0f83fef8c5 --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/VersionVectorSpec.scala @@ -0,0 +1,249 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ + +package akka.cluster.ddata + +import akka.actor.ActorSystem +import akka.actor.Address +import akka.cluster.UniqueAddress +import akka.testkit.TestKit +import org.scalatest.BeforeAndAfterAll +import org.scalatest.Matchers +import org.scalatest.WordSpecLike + +class VersionVectorSpec extends TestKit(ActorSystem("VersionVectorSpec")) + with WordSpecLike with Matchers with BeforeAndAfterAll { + + val node1 = UniqueAddress(Address("akka.tcp", "Sys", "localhost", 2551), 1) + val node2 = UniqueAddress(node1.address.copy(port = Some(2552)), 2) + val node3 = UniqueAddress(node1.address.copy(port = Some(2553)), 3) + val node4 = UniqueAddress(node1.address.copy(port = Some(2554)), 4) + + override def afterAll { + shutdown() + } + + "A VersionVector" must { + + "have zero versions when created" in { + val vv = VersionVector() + vv.versions should be(Map()) + } + + "not happen before itself" in { + val vv1 = VersionVector() + val vv2 = VersionVector() + + vv1 <> vv2 should be(false) + } + + "pass misc comparison test 1" in { + val vv1_1 = VersionVector() + val vv2_1 = vv1_1 + node1 + val vv3_1 = vv2_1 + node2 + val vv4_1 = vv3_1 + node1 + + val vv1_2 = VersionVector() + val vv2_2 = vv1_2 + node1 + val vv3_2 = vv2_2 + node2 + val vv4_2 = vv3_2 + node1 + + vv4_1 <> vv4_2 should be(false) + } + + "pass misc comparison test 2" in { + val vv1_1 = VersionVector() + val vv2_1 = vv1_1 + node1 + val vv3_1 = vv2_1 + node2 + val vv4_1 = vv3_1 + node1 + + val vv1_2 = VersionVector() + val vv2_2 = vv1_2 + node1 + val vv3_2 = vv2_2 + node2 + val vv4_2 = vv3_2 + node1 + val vv5_2 = vv4_2 + node3 + + vv4_1 < vv5_2 should be(true) + } + + "pass misc comparison test 3" in { + var vv1_1 = VersionVector() + val vv2_1 = vv1_1 + node1 + + val vv1_2 = VersionVector() + val vv2_2 = vv1_2 + node2 + + vv2_1 <> vv2_2 should be(true) + } + + "pass misc comparison test 4" in { + val vv1_3 = VersionVector() + val vv2_3 = vv1_3 + node1 + val vv3_3 = vv2_3 + node2 + val vv4_3 = vv3_3 + node1 + + val vv1_4 = VersionVector() + val vv2_4 = vv1_4 + node1 + val vv3_4 = vv2_4 + node1 + val vv4_4 = vv3_4 + node3 + + vv4_3 <> vv4_4 should be(true) + } + + "pass misc comparison test 5" in { + val vv1_1 = VersionVector() + val vv2_1 = vv1_1 + node2 + val vv3_1 = vv2_1 + node2 + + val vv1_2 = VersionVector() + val vv2_2 = vv1_2 + node1 + val vv3_2 = vv2_2 + node2 + val vv4_2 = vv3_2 + node2 + val vv5_2 = vv4_2 + node3 + + vv3_1 < vv5_2 should be(true) + vv5_2 > vv3_1 should be(true) + } + + "pass misc comparison test 6" in { + val vv1_1 = VersionVector() + val vv2_1 = vv1_1 + node1 + val vv3_1 = vv2_1 + node2 + + val vv1_2 = VersionVector() + val vv2_2 = vv1_2 + node1 + val vv3_2 = vv2_2 + node1 + + vv3_1 <> vv3_2 should be(true) + vv3_2 <> vv3_1 should be(true) + } + + "pass misc comparison test 7" in { + val vv1_1 = VersionVector() + val vv2_1 = vv1_1 + node1 + val vv3_1 = vv2_1 + node2 + val vv4_1 = vv3_1 + node2 + val vv5_1 = vv4_1 + node3 + + val vv1_2 = vv4_1 + val vv2_2 = vv1_2 + node2 + val vv3_2 = vv2_2 + node2 + + vv5_1 <> vv3_2 should be(true) + vv3_2 <> vv5_1 should be(true) + } + + "pass misc comparison test 8" in { + val vv1_1 = VersionVector() + val vv2_1 = vv1_1 + node1 + val vv3_1 = vv2_1 + node3 + + val vv1_2 = vv3_1 + node2 + + val vv4_1 = vv3_1 + node3 + + vv4_1 <> vv1_2 should be(true) + vv1_2 <> vv4_1 should be(true) + } + + "correctly merge two version vectors" in { + val vv1_1 = VersionVector() + val vv2_1 = vv1_1 + node1 + val vv3_1 = vv2_1 + node2 + val vv4_1 = vv3_1 + node2 + val vv5_1 = vv4_1 + node3 + + val vv1_2 = vv4_1 + val vv2_2 = vv1_2 + node2 + val vv3_2 = vv2_2 + node2 + + val merged1 = vv3_2 merge vv5_1 + merged1.versions.size should be(3) + merged1.versions.contains(node1) should be(true) + merged1.versions.contains(node2) should be(true) + merged1.versions.contains(node3) should be(true) + + val merged2 = vv5_1 merge vv3_2 + merged2.versions.size should be(3) + merged2.versions.contains(node1) should be(true) + merged2.versions.contains(node2) should be(true) + merged2.versions.contains(node3) should be(true) + + vv3_2 < merged1 should be(true) + vv5_1 < merged1 should be(true) + + vv3_2 < merged2 should be(true) + vv5_1 < merged2 should be(true) + + merged1 == merged2 should be(true) + } + + "correctly merge two disjoint version vectors" in { + + val vv1_1 = VersionVector() + val vv2_1 = vv1_1 + node1 + val vv3_1 = vv2_1 + node2 + val vv4_1 = vv3_1 + node2 + val vv5_1 = vv4_1 + node3 + + val vv1_2 = VersionVector() + val vv2_2 = vv1_2 + node4 + val vv3_2 = vv2_2 + node4 + + val merged1 = vv3_2 merge vv5_1 + merged1.versions.size should be(4) + merged1.versions.contains(node1) should be(true) + merged1.versions.contains(node2) should be(true) + merged1.versions.contains(node3) should be(true) + merged1.versions.contains(node4) should be(true) + + val merged2 = vv5_1 merge vv3_2 + merged2.versions.size should be(4) + merged2.versions.contains(node1) should be(true) + merged2.versions.contains(node2) should be(true) + merged2.versions.contains(node3) should be(true) + merged2.versions.contains(node4) should be(true) + + vv3_2 < merged1 should be(true) + vv5_1 < merged1 should be(true) + + vv3_2 < merged2 should be(true) + vv5_1 < merged2 should be(true) + + merged1 == merged2 should be(true) + } + + "pass blank version vector incrementing" in { + val v1 = VersionVector() + val v2 = VersionVector() + + val vv1 = v1 + node1 + val vv2 = v2 + node2 + + (vv1 > v1) should be(true) + (vv2 > v2) should be(true) + + (vv1 > v2) should be(true) + (vv2 > v1) should be(true) + + (vv2 > vv1) should be(false) + (vv1 > vv2) should be(false) + } + + "pass merging behavior" in { + val a = VersionVector() + val b = VersionVector() + + val a1 = a + node1 + val b1 = b + node2 + + var a2 = a1 + node1 + var c = a2.merge(b1) + var c1 = c + node3 + + (c1 > a2) should be(true) + (c1 > b1) should be(true) + } + } +} diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala new file mode 100644 index 0000000000..f6b9b829fb --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala @@ -0,0 +1,173 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata.protobuf + +import scala.concurrent.duration._ +import org.scalatest.BeforeAndAfterAll +import org.scalatest.Matchers +import org.scalatest.WordSpecLike +import akka.actor.ActorSystem +import akka.actor.Address +import akka.actor.ExtendedActorSystem +import akka.cluster.ddata.Flag +import akka.cluster.ddata.GCounter +import akka.cluster.ddata.GSet +import akka.cluster.ddata.LWWMap +import akka.cluster.ddata.LWWRegister +import akka.cluster.ddata.ORMap +import akka.cluster.ddata.ORSet +import akka.cluster.ddata.PNCounter +import akka.cluster.ddata.PNCounterMap +import akka.cluster.ddata.Replicator._ +import akka.cluster.ddata.Replicator.Internal._ +import akka.cluster.ddata.VersionVector +import akka.testkit.TestKit +import akka.cluster.UniqueAddress +import com.typesafe.config.ConfigFactory + +class ReplicatedDataSerializerSpec extends TestKit(ActorSystem("ReplicatedDataSerializerSpec", + ConfigFactory.parseString(""" + akka.actor.provider=akka.cluster.ClusterActorRefProvider + akka.remote.netty.tcp.port=0 + """))) with WordSpecLike with Matchers with BeforeAndAfterAll { + + val serializer = new ReplicatedDataSerializer(system.asInstanceOf[ExtendedActorSystem]) + + val address1 = UniqueAddress(Address("akka.tcp", system.name, "some.host.org", 4711), 1) + val address2 = UniqueAddress(Address("akka.tcp", system.name, "other.host.org", 4711), 2) + val address3 = UniqueAddress(Address("akka.tcp", system.name, "some.host.org", 4712), 3) + + override def afterAll { + shutdown() + } + + def checkSerialization(obj: AnyRef): Unit = { + val blob = serializer.toBinary(obj) + val ref = serializer.fromBinary(blob, serializer.manifest(obj)) + ref should be(obj) + } + + def checkSameContent(a: AnyRef, b: AnyRef): Unit = { + a should be(b) + val blobA = serializer.toBinary(a) + val blobB = serializer.toBinary(b) + blobA.toSeq should be(blobB.toSeq) + } + + "ReplicatedDataSerializer" must { + + "serialize GSet" in { + checkSerialization(GSet()) + checkSerialization(GSet() + "a") + checkSerialization(GSet() + "a" + "b") + + checkSerialization(GSet() + 1 + 2 + 3) + checkSerialization(GSet() + address1 + address2) + + checkSerialization(GSet() + 1L + "2" + 3 + address1) + + checkSameContent(GSet() + "a" + "b", GSet() + "a" + "b") + checkSameContent(GSet() + "a" + "b", GSet() + "b" + "a") + checkSameContent(GSet() + address1 + address2 + address3, GSet() + address2 + address1 + address3) + checkSameContent(GSet() + address1 + address2 + address3, GSet() + address3 + address2 + address1) + } + + "serialize ORSet" in { + checkSerialization(ORSet()) + checkSerialization(ORSet().add(address1, "a")) + checkSerialization(ORSet().add(address1, "a").add(address2, "a")) + checkSerialization(ORSet().add(address1, "a").remove(address2, "a")) + checkSerialization(ORSet().add(address1, "a").add(address2, "b").remove(address1, "a")) + checkSerialization(ORSet().add(address1, 1).add(address2, 2)) + checkSerialization(ORSet().add(address1, 1L).add(address2, 2L)) + checkSerialization(ORSet().add(address1, "a").add(address2, 2).add(address3, 3L).add(address3, address3)) + + val s1 = ORSet().add(address1, "a").add(address2, "b") + val s2 = ORSet().add(address2, "b").add(address1, "a") + checkSameContent(s1.merge(s2), s2.merge(s1)) + + val s3 = ORSet().add(address1, "a").add(address2, 17).remove(address3, 17) + val s4 = ORSet().add(address2, 17).remove(address3, 17).add(address1, "a") + checkSameContent(s3.merge(s4), s4.merge(s3)) + } + + "serialize Flag" in { + checkSerialization(Flag()) + checkSerialization(Flag().switchOn) + } + + "serialize LWWRegister" in { + checkSerialization(LWWRegister(address1, "value1", LWWRegister.defaultClock)) + checkSerialization(LWWRegister(address1, "value2", LWWRegister.defaultClock[String]) + .withValue(address2, "value3", LWWRegister.defaultClock[String])) + } + + "serialize GCounter" in { + checkSerialization(GCounter()) + checkSerialization(GCounter().increment(address1, 3)) + checkSerialization(GCounter().increment(address1, 2).increment(address2, 5)) + + checkSameContent( + GCounter().increment(address1, 2).increment(address2, 5), + GCounter().increment(address2, 5).increment(address1, 1).increment(address1, 1)) + checkSameContent( + GCounter().increment(address1, 2).increment(address3, 5), + GCounter().increment(address3, 5).increment(address1, 2)) + } + + "serialize PNCounter" in { + checkSerialization(PNCounter()) + checkSerialization(PNCounter().increment(address1, 3)) + checkSerialization(PNCounter().increment(address1, 3).decrement(address1, 1)) + checkSerialization(PNCounter().increment(address1, 2).increment(address2, 5)) + checkSerialization(PNCounter().increment(address1, 2).increment(address2, 5).decrement(address1, 1)) + + checkSameContent( + PNCounter().increment(address1, 2).increment(address2, 5), + PNCounter().increment(address2, 5).increment(address1, 1).increment(address1, 1)) + checkSameContent( + PNCounter().increment(address1, 2).increment(address3, 5), + PNCounter().increment(address3, 5).increment(address1, 2)) + checkSameContent( + PNCounter().increment(address1, 2).decrement(address1, 1).increment(address3, 5), + PNCounter().increment(address3, 5).increment(address1, 2).decrement(address1, 1)) + } + + "serialize ORMap" in { + checkSerialization(ORMap()) + checkSerialization(ORMap().put(address1, "a", GSet() + "A")) + checkSerialization(ORMap().put(address1, "a", GSet() + "A").put(address2, "b", GSet() + "B")) + } + + "serialize LWWMap" in { + checkSerialization(LWWMap()) + checkSerialization(LWWMap().put(address1, "a", "value1", LWWRegister.defaultClock[Any])) + checkSerialization(LWWMap().put(address1, "a", "value1", LWWRegister.defaultClock[Any]) + .put(address2, "b", 17, LWWRegister.defaultClock[Any])) + } + + "serialize PNCounterMap" in { + checkSerialization(PNCounterMap()) + checkSerialization(PNCounterMap().increment(address1, "a", 3)) + checkSerialization(PNCounterMap().increment(address1, "a", 3).decrement(address2, "a", 2). + increment(address2, "b", 5)) + } + + "serialize DeletedData" in { + checkSerialization(DeletedData) + } + + "serialize VersionVector" in { + checkSerialization(VersionVector()) + checkSerialization(VersionVector().increment(address1)) + checkSerialization(VersionVector().increment(address1).increment(address2)) + + val v1 = VersionVector().increment(address1).increment(address1) + val v2 = VersionVector().increment(address2) + checkSameContent(v1.merge(v2), v2.merge(v1)) + } + + } +} + diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala new file mode 100644 index 0000000000..11cc68126a --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala @@ -0,0 +1,81 @@ +/** + * Copyright (C) 2009-2015 Typesafe Inc. + */ +package akka.cluster.ddata.protobuf + +import scala.concurrent.duration._ +import org.scalatest.BeforeAndAfterAll +import org.scalatest.Matchers +import org.scalatest.WordSpecLike +import akka.actor.ActorSystem +import akka.actor.Address +import akka.actor.ExtendedActorSystem +import akka.actor.Props +import akka.cluster.ddata.GSet +import akka.cluster.ddata.GSetKey +import akka.cluster.ddata.PruningState +import akka.cluster.ddata.PruningState.PruningInitialized +import akka.cluster.ddata.PruningState.PruningPerformed +import akka.cluster.ddata.Replicator._ +import akka.cluster.ddata.Replicator.Internal._ +import akka.testkit.TestKit +import akka.util.ByteString +import akka.cluster.UniqueAddress +import com.typesafe.config.ConfigFactory + +class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem("ReplicatorMessageSerializerSpec", + ConfigFactory.parseString(""" + akka.actor.provider=akka.cluster.ClusterActorRefProvider + akka.remote.netty.tcp.port=0 + """))) with WordSpecLike with Matchers with BeforeAndAfterAll { + + val serializer = new ReplicatorMessageSerializer(system.asInstanceOf[ExtendedActorSystem]) + + val address1 = UniqueAddress(Address("akka.tcp", system.name, "some.host.org", 4711), 1) + val address2 = UniqueAddress(Address("akka.tcp", system.name, "other.host.org", 4711), 2) + val address3 = UniqueAddress(Address("akka.tcp", system.name, "some.host.org", 4712), 3) + + val keyA = GSetKey[String]("A") + + override def afterAll { + shutdown() + } + + def checkSerialization(obj: AnyRef): Unit = { + val blob = serializer.toBinary(obj) + val ref = serializer.fromBinary(blob, serializer.manifest(obj)) + ref should be(obj) + } + + "ReplicatorMessageSerializer" must { + + "serialize Replicator messages" in { + val ref1 = system.actorOf(Props.empty, "ref1") + val data1 = GSet.empty[String] + "a" + + checkSerialization(Get(keyA, ReadLocal)) + checkSerialization(Get(keyA, ReadMajority(2.seconds), Some("x"))) + checkSerialization(GetSuccess(keyA, None)(data1)) + checkSerialization(GetSuccess(keyA, Some("x"))(data1)) + checkSerialization(NotFound(keyA, Some("x"))) + checkSerialization(GetFailure(keyA, Some("x"))) + checkSerialization(Subscribe(keyA, ref1)) + checkSerialization(Unsubscribe(keyA, ref1)) + checkSerialization(Changed(keyA)(data1)) + checkSerialization(DataEnvelope(data1)) + checkSerialization(DataEnvelope(data1, pruning = Map( + address1 -> PruningState(address2, PruningPerformed), + address3 -> PruningState(address2, PruningInitialized(Set(address1.address)))))) + checkSerialization(Write("A", DataEnvelope(data1))) + checkSerialization(WriteAck) + checkSerialization(Read("A")) + checkSerialization(ReadResult(Some(DataEnvelope(data1)))) + checkSerialization(ReadResult(None)) + checkSerialization(Status(Map("A" -> ByteString.fromString("a"), + "B" -> ByteString.fromString("b")), chunk = 3, totChunks = 10)) + checkSerialization(Gossip(Map("A" -> DataEnvelope(data1), + "B" -> DataEnvelope(GSet() + "b" + "c")), sendBack = true)) + } + + } +} diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/sample/DataBot.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/sample/DataBot.scala new file mode 100644 index 0000000000..22deb14cc9 --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/sample/DataBot.scala @@ -0,0 +1,98 @@ +/** + * Copyright (C) 2015 Typesafe Inc. + */ +package akka.cluster.ddata.sample + +import scala.concurrent.duration._ +import scala.concurrent.forkjoin.ThreadLocalRandom +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.actor.ActorSystem +import akka.actor.Props +import akka.cluster.Cluster +import akka.cluster.ddata.DistributedData +import akka.cluster.ddata.ORSet +import com.typesafe.config.ConfigFactory +import akka.cluster.ddata.Replicator +import akka.cluster.ddata.ORSetKey + +object DataBot { + + def main(args: Array[String]): Unit = { + if (args.isEmpty) + startup(Seq("2551", "2552", "0")) + else + startup(args) + } + + def startup(ports: Seq[String]): Unit = { + ports.foreach { port ⇒ + // Override the configuration of the port + val config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port). + withFallback(ConfigFactory.load( + ConfigFactory.parseString(""" + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.remote { + netty.tcp { + hostname = "127.0.0.1" + port = 0 + } + } + + akka.cluster { + seed-nodes = [ + "akka.tcp://ClusterSystem@127.0.0.1:2551", + "akka.tcp://ClusterSystem@127.0.0.1:2552"] + + auto-down-unreachable-after = 10s + } + """))) + + // Create an Akka system + val system = ActorSystem("ClusterSystem", config) + // Create an actor that handles cluster domain events + system.actorOf(Props[DataBot], name = "dataBot") + } + } + + private case object Tick + +} + +class DataBot extends Actor with ActorLogging { + import DataBot._ + import Replicator._ + + val replicator = DistributedData(context.system).replicator + implicit val node = Cluster(context.system) + + import context.dispatcher + val tickTask = context.system.scheduler.schedule(5.seconds, 5.seconds, self, Tick) + + val DataKey = ORSetKey[String]("key") + + replicator ! Subscribe(DataKey, self) + + def receive = { + case Tick ⇒ + val s = ThreadLocalRandom.current().nextInt(97, 123).toChar.toString + if (ThreadLocalRandom.current().nextBoolean()) { + // add + log.info("Adding: {}", s) + replicator ! Update(DataKey, ORSet.empty[String], WriteLocal)(_ + s) + } else { + // remove + log.info("Removing: {}", s) + replicator ! Update(DataKey, ORSet.empty[String], WriteLocal)(_ - s) + } + + case _: UpdateResponse[_] ⇒ // ignore + + case c @ Changed(DataKey) ⇒ + log.info("Current elements: {}", c.get(DataKey).elements) + } + + override def postStop(): Unit = tickTask.cancel() + +} + diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/sample/LotsOfDataBot.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/sample/LotsOfDataBot.scala new file mode 100644 index 0000000000..1e11941473 --- /dev/null +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/sample/LotsOfDataBot.scala @@ -0,0 +1,137 @@ +/** + * Copyright (C) 2015 Typesafe Inc. + */ +package akka.cluster.ddata.sample + +import scala.concurrent.duration._ +import scala.concurrent.forkjoin.ThreadLocalRandom +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.actor.ActorSystem +import akka.actor.Props +import akka.cluster.Cluster +import akka.cluster.ddata.DistributedData +import akka.cluster.ddata.ORSet +import com.typesafe.config.ConfigFactory +import akka.cluster.ddata.Replicator +import akka.cluster.ddata.ORSetKey + +object LotsOfDataBot { + + def main(args: Array[String]): Unit = { + if (args.isEmpty) + startup(Seq("2551", "2552", "0")) + else + startup(args) + } + + def startup(ports: Seq[String]): Unit = { + ports.foreach { port ⇒ + // Override the configuration of the port + val config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port). + withFallback(ConfigFactory.load( + ConfigFactory.parseString(""" + passive = off + max-entries = 100000 + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.remote { + netty.tcp { + hostname = "127.0.0.1" + port = 0 + } + } + + akka.cluster { + seed-nodes = [ + "akka.tcp://ClusterSystem@127.0.0.1:2551", + "akka.tcp://ClusterSystem@127.0.0.1:2552"] + + auto-down-unreachable-after = 10s + } + akka.cluster.distributed-data.use-offheap-memory = off + akka.remote.log-frame-size-exceeding = 10000b + """))) + + // Create an Akka system + val system = ActorSystem("ClusterSystem", config) + // Create an actor that handles cluster domain events + system.actorOf(Props[LotsOfDataBot], name = "dataBot") + } + } + + private case object Tick + +} + +class LotsOfDataBot extends Actor with ActorLogging { + import LotsOfDataBot._ + import Replicator._ + + val replicator = DistributedData(context.system).replicator + implicit val cluster = Cluster(context.system) + + import context.dispatcher + val isPassive = context.system.settings.config.getBoolean("passive") + var tickTask = + if (isPassive) + context.system.scheduler.schedule(1.seconds, 1.seconds, self, Tick) + else + context.system.scheduler.schedule(20.millis, 20.millis, self, Tick) + + val startTime = System.nanoTime() + var count = 1L + val maxEntries = context.system.settings.config.getInt("max-entries") + + def receive = if (isPassive) passive else active + + def active: Receive = { + case Tick ⇒ + val loop = if (count >= maxEntries) 1 else 100 + for (_ ← 1 to loop) { + count += 1 + if (count % 10000 == 0) + log.info("Reached {} entries", count) + if (count == maxEntries) { + log.info("Reached {} entries", count) + tickTask.cancel() + tickTask = context.system.scheduler.schedule(1.seconds, 1.seconds, self, Tick) + } + val key = ORSetKey[String]((count % maxEntries).toString) + if (count <= 100) + replicator ! Subscribe(key, self) + val s = ThreadLocalRandom.current().nextInt(97, 123).toChar.toString + if (count <= maxEntries || ThreadLocalRandom.current().nextBoolean()) { + // add + replicator ! Update(key, ORSet(), WriteLocal)(_ + s) + } else { + // remove + replicator ! Update(key, ORSet(), WriteLocal)(_ - s) + } + } + + case _: UpdateResponse[_] ⇒ // ignore + + case c @ Changed(ORSetKey(id)) ⇒ + val ORSet(elements) = c.dataValue + log.info("Current elements: {} -> {}", id, elements) + } + + def passive: Receive = { + case Tick ⇒ + if (!tickTask.isCancelled) + replicator ! GetKeyIds + case GetKeyIdsResult(keys) ⇒ + if (keys.size >= maxEntries) { + tickTask.cancel() + val duration = (System.nanoTime() - startTime).nanos.toMillis + log.info("It took {} ms to replicate {} entries", duration, keys.size) + } + case c @ Changed(ORSetKey(id)) ⇒ + val ORSet(elements) = c.dataValue + log.info("Current elements: {} -> {}", id, elements) + } + + override def postStop(): Unit = tickTask.cancel() + +} + diff --git a/akka-docs/rst/general/configuration.rst b/akka-docs/rst/general/configuration.rst index e86f45e40c..9a75215061 100644 --- a/akka-docs/rst/general/configuration.rst +++ b/akka-docs/rst/general/configuration.rst @@ -484,3 +484,35 @@ akka-testkit .. literalinclude:: ../../../akka-testkit/src/main/resources/reference.conf :language: none +.. _config-cluster-metrics: + +akka-cluster-metrics +~~~~~~~~~~~~-------- + +.. literalinclude:: ../../../akka-cluster-metrics/src/main/resources/reference.conf + :language: none + +.. _config-cluster-tools: + +akka-cluster-tools +~~~~~~~~~~~~------ + +.. literalinclude:: ../../../akka-cluster-tools/src/main/resources/reference.conf + :language: none + +.. _config-cluster-sharding: + +akka-cluster-sharding +~~~~~~~~~~~~--------- + +.. literalinclude:: ../../../akka-cluster-sharding/src/main/resources/reference.conf + :language: none + +.. _config-distributed-data: + +akka-distributed-data +~~~~~~~~~~~~--------- + +.. literalinclude:: ../../../akka-distributed-data/src/main/resources/reference.conf + :language: none + diff --git a/akka-docs/rst/java/code/docs/ddata/DataBot.java b/akka-docs/rst/java/code/docs/ddata/DataBot.java new file mode 100644 index 0000000000..df95ceaaf0 --- /dev/null +++ b/akka-docs/rst/java/code/docs/ddata/DataBot.java @@ -0,0 +1,95 @@ +/** + * Copyright (C) 2015 Typesafe Inc. + */ +package docs.ddata; + +//#data-bot +import static java.util.concurrent.TimeUnit.SECONDS; + +import scala.concurrent.duration.Duration; +import scala.concurrent.forkjoin.ThreadLocalRandom; + +import akka.actor.AbstractActor; +import akka.actor.ActorRef; +import akka.actor.Cancellable; +import akka.cluster.Cluster; +import akka.cluster.ddata.DistributedData; +import akka.cluster.ddata.Key; +import akka.cluster.ddata.ORSet; +import akka.cluster.ddata.ORSetKey; +import akka.cluster.ddata.Replicator; +import akka.cluster.ddata.Replicator.Changed; +import akka.cluster.ddata.Replicator.Subscribe; +import akka.cluster.ddata.Replicator.Update; +import akka.cluster.ddata.Replicator.UpdateResponse; +import akka.event.Logging; +import akka.event.LoggingAdapter; +import akka.japi.pf.ReceiveBuilder; + +public class DataBot extends AbstractActor { + + private static final String TICK = "tick"; + + private final LoggingAdapter log = Logging.getLogger(context().system(), this); + + private final ActorRef replicator = + DistributedData.get(context().system()).replicator(); + private final Cluster node = Cluster.get(context().system()); + + private final Cancellable tickTask = context().system().scheduler().schedule( + Duration.create(5, SECONDS), Duration.create(5, SECONDS), self(), TICK, + context().dispatcher(), self()); + + private final Key> dataKey = ORSetKey.create("key"); + + public DataBot() { + receive(ReceiveBuilder. + match(String.class, a -> a.equals(TICK), a -> { + String s = String.valueOf((char) ThreadLocalRandom.current().nextInt(97, 123)); + if (ThreadLocalRandom.current().nextBoolean()) { + // add + log.info("Adding: {}", s); + Update> update = new Update<>( + dataKey, + ORSet.create(), + Replicator.writeLocal(), + curr -> curr.add(node, s)); + replicator.tell(update, self()); + } else { + // remove + log.info("Removing: {}", s); + Update> update = new Update<>( + dataKey, + ORSet.create(), + Replicator.writeLocal(), + curr -> curr.remove(node, s)); + replicator.tell(update, self()); + } + }). + match(UpdateResponse.class, r -> { + // ignore + }). + match(Changed.class, c -> c.key().equals(dataKey), c -> { + @SuppressWarnings("unchecked") + Changed> c2 = c; + ORSet data = c2.dataValue(); + log.info("Current elements: {}", data.getElements()); + }). + matchAny(o -> log.info("received unknown message")).build() + ); + } + + + @Override + public void preStart() { + Subscribe> subscribe = new Subscribe<>(dataKey, self()); + replicator.tell(subscribe, ActorRef.noSender()); + } + + @Override + public void postStop(){ + tickTask.cancel(); + } + +} +//#data-bot diff --git a/akka-docs/rst/java/code/docs/ddata/DistributedDataDocTest.java b/akka-docs/rst/java/code/docs/ddata/DistributedDataDocTest.java new file mode 100644 index 0000000000..2521405d06 --- /dev/null +++ b/akka-docs/rst/java/code/docs/ddata/DistributedDataDocTest.java @@ -0,0 +1,411 @@ +/** + * Copyright (C) 2015 Typesafe Inc. + */ +package docs.ddata; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.Assert.assertEquals; + +import com.typesafe.config.ConfigFactory; +import java.math.BigInteger; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Optional; +import java.util.Set; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import scala.PartialFunction; +import scala.concurrent.duration.Duration; +import scala.runtime.BoxedUnit; + +import akka.actor.ActorRef; +import akka.actor.ActorSystem; +import akka.cluster.Cluster; +import akka.cluster.ddata.DistributedData; +import akka.cluster.ddata.Flag; +import akka.cluster.ddata.FlagKey; +import akka.cluster.ddata.GSet; +import akka.cluster.ddata.GSetKey; +import akka.cluster.ddata.Key; +import akka.cluster.ddata.LWWRegister; +import akka.cluster.ddata.ORSet; +import akka.cluster.ddata.ORSetKey; +import akka.cluster.ddata.PNCounter; +import akka.cluster.ddata.PNCounterKey; +import akka.cluster.ddata.PNCounterMap; +import akka.cluster.ddata.Replicator; +import akka.cluster.ddata.Replicator.Changed; +import akka.cluster.ddata.Replicator.Delete; +import akka.cluster.ddata.Replicator.GetFailure; +import akka.cluster.ddata.Replicator.GetSuccess; +import akka.cluster.ddata.Replicator.NotFound; +import akka.cluster.ddata.Replicator.ReadAll; +import akka.cluster.ddata.Replicator.ReadConsistency; +import akka.cluster.ddata.Replicator.ReadFrom; +import akka.cluster.ddata.Replicator.ReadMajority; +import akka.cluster.ddata.Replicator.Subscribe; +import akka.cluster.ddata.Replicator.UpdateSuccess; +import akka.cluster.ddata.Replicator.UpdateTimeout; +import akka.cluster.ddata.Replicator.WriteAll; +import akka.cluster.ddata.Replicator.WriteConsistency; +import akka.cluster.ddata.Replicator.WriteMajority; +import akka.cluster.ddata.Replicator.WriteTo; +import akka.japi.pf.ReceiveBuilder; +import akka.testkit.JavaTestKit; + +public class DistributedDataDocTest { + + static ActorSystem system; + + void receive(PartialFunction pf) { + } + + JavaTestKit probe = new JavaTestKit(system); + + ActorRef self() { + return probe.getRef(); + } + + ActorRef sender() { + return probe.getRef(); + } + + @BeforeClass + public static void setup() { + system = ActorSystem.create("DistributedDataDocTest", + ConfigFactory.parseString(DistributedDataDocSpec.config())); + } + + @AfterClass + public static void tearDown() { + JavaTestKit.shutdownActorSystem(system); + system = null; + } + + @Test + public void demonstrateUpdate() { + probe = new JavaTestKit(system); + + //#update + final Cluster node = Cluster.get(system); + final ActorRef replicator = DistributedData.get(system).replicator(); + + final Key counter1Key = PNCounterKey.create("counter1"); + final Key> set1Key = GSetKey.create("set1"); + final Key> set2Key = ORSetKey.create("set2"); + final Key activeFlagKey = FlagKey.create("active"); + + replicator.tell(new Replicator.Update(counter1Key, PNCounter.create(), + Replicator.writeLocal(), curr -> curr.increment(node, 1)), self()); + + final WriteConsistency writeTo3 = new WriteTo(3, Duration.create(1, SECONDS)); + replicator.tell(new Replicator.Update>(set1Key, GSet.create(), + writeTo3, curr -> curr.add("hello")), self()); + + final WriteConsistency writeMajority = + new WriteMajority(Duration.create(5, SECONDS)); + replicator.tell(new Replicator.Update>(set2Key, ORSet.create(), + writeMajority, curr -> curr.add(node, "hello")), self()); + + final WriteConsistency writeAll = new WriteAll(Duration.create(5, SECONDS)); + replicator.tell(new Replicator.Update(activeFlagKey, Flag.create(), + writeAll, curr -> curr.switchOn()), self()); + //#update + + probe.expectMsgClass(UpdateSuccess.class); + //#update-response1 + receive(ReceiveBuilder. + match(UpdateSuccess.class, a -> a.key().equals(counter1Key), a -> { + // ok + }).build()); + //#update-response1 + + //#update-response2 + receive(ReceiveBuilder. + match(UpdateSuccess.class, a -> a.key().equals(set1Key), a -> { + // ok + }). + match(UpdateTimeout.class, a -> a.key().equals(set1Key), a -> { + // write to 3 nodes failed within 1.second + }).build()); + //#update-response2 + } + + @Test + public void demonstrateUpdateWithRequestContext() { + probe = new JavaTestKit(system); + + //#update-request-context + final Cluster node = Cluster.get(system); + final ActorRef replicator = DistributedData.get(system).replicator(); + + final WriteConsistency writeTwo = new WriteTo(2, Duration.create(3, SECONDS)); + final Key counter1Key = PNCounterKey.create("counter1"); + + receive(ReceiveBuilder. + match(String.class, a -> a.equals("increment"), a -> { + // incoming command to increase the counter + Optional reqContext = Optional.of(sender()); + Replicator.Update upd = new Replicator.Update(counter1Key, + PNCounter.create(), writeTwo, reqContext, curr -> curr.increment(node, 1)); + replicator.tell(upd, self()); + }). + + match(UpdateSuccess.class, a -> a.key().equals(counter1Key), a -> { + ActorRef replyTo = (ActorRef) a.getRequest().get(); + replyTo.tell("ack", self()); + }). + + match(UpdateTimeout.class, a -> a.key().equals(counter1Key), a -> { + ActorRef replyTo = (ActorRef) a.getRequest().get(); + replyTo.tell("nack", self()); + }).build()); + + //#update-request-context + } + + @SuppressWarnings({ "unused", "unchecked" }) + @Test + public void demonstrateGet() { + probe = new JavaTestKit(system); + + //#get + final ActorRef replicator = DistributedData.get(system).replicator(); + final Key counter1Key = PNCounterKey.create("counter1"); + final Key> set1Key = GSetKey.create("set1"); + final Key> set2Key = ORSetKey.create("set2"); + final Key activeFlagKey = FlagKey.create("active"); + + replicator.tell(new Replicator.Get(counter1Key, + Replicator.readLocal()), self()); + + final ReadConsistency readFrom3 = new ReadFrom(3, Duration.create(1, SECONDS)); + replicator.tell(new Replicator.Get>(set1Key, + readFrom3), self()); + + final ReadConsistency readMajority = new ReadMajority(Duration.create(5, SECONDS)); + replicator.tell(new Replicator.Get>(set2Key, + readMajority), self()); + + final ReadConsistency readAll = new ReadAll(Duration.create(5, SECONDS)); + replicator.tell(new Replicator.Get(activeFlagKey, + readAll), self()); + //#get + + //#get-response1 + receive(ReceiveBuilder. + match(GetSuccess.class, a -> a.key().equals(counter1Key), a -> { + GetSuccess g = a; + BigInteger value = g.dataValue().getValue(); + }). + match(NotFound.class, a -> a.key().equals(counter1Key), a -> { + // key counter1 does not exist + }).build()); + //#get-response1 + + //#get-response2 + receive(ReceiveBuilder. + match(GetSuccess.class, a -> a.key().equals(set1Key), a -> { + GetSuccess> g = a; + Set value = g.dataValue().getElements(); + }). + match(GetFailure.class, a -> a.key().equals(set1Key), a -> { + // read from 3 nodes failed within 1.second + }). + match(NotFound.class, a -> a.key().equals(set1Key), a -> { + // key set1 does not exist + }).build()); + //#get-response2 + + } + + @SuppressWarnings("unchecked") + @Test + public void demonstrateGetWithRequestContext() { + probe = new JavaTestKit(system); + + //#get-request-context + final ActorRef replicator = DistributedData.get(system).replicator(); + final ReadConsistency readTwo = new ReadFrom(2, Duration.create(3, SECONDS)); + final Key counter1Key = PNCounterKey.create("counter1"); + + receive(ReceiveBuilder. + match(String.class, a -> a.equals("get-count"), a -> { + // incoming request to retrieve current value of the counter + Optional reqContext = Optional.of(sender()); + replicator.tell(new Replicator.Get(counter1Key, + readTwo), self()); + }). + + match(GetSuccess.class, a -> a.key().equals(counter1Key), a -> { + ActorRef replyTo = (ActorRef) a.getRequest().get(); + GetSuccess g = a; + long value = g.dataValue().getValue().longValue(); + replyTo.tell(value, self()); + }). + + match(GetFailure.class, a -> a.key().equals(counter1Key), a -> { + ActorRef replyTo = (ActorRef) a.getRequest().get(); + replyTo.tell(-1L, self()); + }). + + match(NotFound.class, a -> a.key().equals(counter1Key), a -> { + ActorRef replyTo = (ActorRef) a.getRequest().get(); + replyTo.tell(0L, self()); + }).build()); + //#get-request-context + } + + @SuppressWarnings("unchecked") + abstract class MyActor { + //#subscribe + final ActorRef replicator = DistributedData.get(system).replicator(); + final Key counter1Key = PNCounterKey.create("counter1"); + + BigInteger currentValue = BigInteger.valueOf(0); + + public MyActor() { + receive(ReceiveBuilder. + match(Changed.class, a -> a.key().equals(counter1Key), a -> { + Changed g = a; + currentValue = g.dataValue().getValue(); + }). + + match(String.class, a -> a.equals("get-count"), a -> { + // incoming request to retrieve current value of the counter + sender().tell(currentValue, sender()); + }).build()); + } + + public void preStart() { + // subscribe to changes of the Counter1Key value + replicator.tell(new Subscribe(counter1Key, self()), ActorRef.noSender()); + } + + //#subscribe + } + + @Test + public void demonstrateDelete() { + probe = new JavaTestKit(system); + + //#delete + final ActorRef replicator = DistributedData.get(system).replicator(); + final Key counter1Key = PNCounterKey.create("counter1"); + final Key> set2Key = ORSetKey.create("set2"); + + replicator.tell(new Delete(counter1Key, + Replicator.writeLocal()), self()); + + final WriteConsistency writeMajority = + new WriteMajority(Duration.create(5, SECONDS)); + replicator.tell(new Delete(counter1Key, + writeMajority), self()); + //#delete + } + + public void demonstratePNCounter() { + //#pncounter + final Cluster node = Cluster.get(system); + final PNCounter c0 = PNCounter.create(); + final PNCounter c1 = c0.increment(node, 1); + final PNCounter c2 = c1.increment(node, 7); + final PNCounter c3 = c2.decrement(node, 2); + System.out.println(c3.value()); // 6 + //#pncounter + } + + public void demonstratePNCounterMap() { + //#pncountermap + final Cluster node = Cluster.get(system); + final PNCounterMap m0 = PNCounterMap.create(); + final PNCounterMap m1 = m0.increment(node, "a", 7); + final PNCounterMap m2 = m1.decrement(node, "a", 2); + final PNCounterMap m3 = m2.increment(node, "b", 1); + System.out.println(m3.get("a")); // 5 + System.out.println(m3.getEntries()); + //#pncountermap + } + + public void demonstrateGSet() { + //#gset + final GSet s0 = GSet.create(); + final GSet s1 = s0.add("a"); + final GSet s2 = s1.add("b").add("c"); + if (s2.contains("a")) + System.out.println(s2.getElements()); // a, b, c + //#gset + } + + public void demonstrateORSet() { + //#orset + final Cluster node = Cluster.get(system); + final ORSet s0 = ORSet.create(); + final ORSet s1 = s0.add(node, "a"); + final ORSet s2 = s1.add(node, "b"); + final ORSet s3 = s2.remove(node, "a"); + System.out.println(s3.getElements()); // b + //#orset + } + + public void demonstrateFlag() { + //#flag + final Flag f0 = Flag.create(); + final Flag f1 = f0.switchOn(); + System.out.println(f1.enabled()); + //#flag + } + + @Test + public void demonstrateLWWRegister() { + //#lwwregister + final Cluster node = Cluster.get(system); + final LWWRegister r1 = LWWRegister.create(node, "Hello"); + final LWWRegister r2 = r1.withValue(node, "Hi"); + System.out.println(r1.value() + " by " + r1.updatedBy() + " at " + r1.timestamp()); + //#lwwregister + assertEquals("Hi", r2.value()); + } + + static + //#lwwregister-custom-clock + class Record { + public final int version; + public final String name; + public final String address; + + public Record(int version, String name, String address) { + this.version = version; + this.name = name; + this.address = address; + } + } + + //#lwwregister-custom-clock + + public void demonstrateLWWRegisterWithCustomClock() { + //#lwwregister-custom-clock + + final Cluster node = Cluster.get(system); + final LWWRegister.Clock recordClock = new LWWRegister.Clock() { + @Override + public long apply(long currentTimestamp, Record value) { + return value.version; + } + }; + + final Record record1 = new Record(1, "Alice", "Union Square"); + final LWWRegister r1 = LWWRegister.create(node, record1); + + final Record record2 = new Record(2, "Alice", "Madison Square"); + final LWWRegister r2 = LWWRegister.create(node, record2); + + final LWWRegister r3 = r1.merge(r2); + System.out.println(r3.value()); + //#lwwregister-custom-clock + + assertEquals("Madison Square", r3.value().address); + } + +} diff --git a/akka-docs/rst/java/distributed-data.rst b/akka-docs/rst/java/distributed-data.rst new file mode 100644 index 0000000000..d902a9c7c2 --- /dev/null +++ b/akka-docs/rst/java/distributed-data.rst @@ -0,0 +1,516 @@ + +.. _distributed_data_java: + +################## + Distributed Data +################## + +*Akka Distributed Data* is useful when you need to share data between nodes in an +Akka Cluster. The data is accessed with an actor providing a key-value store like API. +The keys are unique identifiers with type information of the data values. The values +are *Conflict Free Replicated Data Types* (CRDTs). + +All data entries are spread to all nodes, or nodes with a certain role, in the cluster +via direct replication and gossip based dissemination. You have fine grained control +of the consistency level for reads and writes. + +The nature CRDTs makes it possible to perform updates from any node without coordination. +Concurrent updates from different nodes will automatically be resolved by the monotonic +merge function, which all data types must provide. The state changes always converge. +Several useful data types for counters, sets, maps and registers are provided and +you can also implement your own custom data types. + +It is eventually consistent and geared toward providing high read and write availability +(partition tolerance), with low latency. Note that in an eventually consistent system a read may return an +out-of-date value. + +Using the Replicator +==================== + +The ``akka.cluster.ddata.Replicator`` actor provides the API for interacting with the data. +The ``Replicator`` actor must be started on each node in the cluster, or group of nodes tagged +with a specific role. It communicates with other ``Replicator`` instances with the same path +(without address) that are running on other nodes . For convenience it can be used with the +``akka.cluster.ddata.DistributedData`` extension. + +Below is an example of an actor that schedules tick messages to itself and for each tick +adds or removes elements from a ``ORSet`` (observed-remove set). It also subscribes to +changes of this. + +.. includecode:: code/docs/ddata/DataBot.java#data-bot + +.. _replicator_update_java: + +Update +------ + +To modify and replicate a data value you send a ``Replicator.Update`` message to the the local +``Replicator``. + +The current data value for the ``key`` of the ``Update`` is passed as parameter to the ``modify`` +function of the ``Update``. The function is supposed to return the new value of the data, which +will then be replicated according to the given consistency level. + +The ``modify`` function is called by the ``Replicator`` actor and must therefore be a pure +function that only uses the data parameter and stable fields from enclosing scope. It must +for example not access ``sender()`` reference of an enclosing actor. + +``Update`` is intended to only be sent from an actor running in same local ``ActorSystem`` as + * the `Replicator`, because the `modify` function is typically not serializable. + +You supply a write consistency level which has the following meaning: + +* ``writeLocal`` the value will immediately only be written to the local replica, + and later disseminated with gossip +* ``writeTo(n)`` the value will immediately be written to at least ``n`` replicas, + including the local replica +* ``writeMajority`` the value will immediately be written to a majority of replicas, i.e. + at least **N/2 + 1** replicas, where N is the number of nodes in the cluster + (or cluster role group) +* ``writeAll`` the value will immediately be written to all nodes in the cluster + (or all nodes in the cluster role group) + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#update + +As reply of the ``Update`` a ``Replicator.UpdateSuccess`` is sent to the sender of the +``Update`` if the value was successfully replicated according to the supplied consistency +level within the supplied timeout. Otherwise a ``Replicator.UpdateFailure`` subclass is +sent back. Note that a ``Replicator.UpdateTimeout`` reply does not mean that the update completely failed +or was rolled back. It may still have been replicated to some nodes, and will eventually +be replicated to all nodes with the gossip protocol. + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#update-response1 + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#update-response2 + +You will always see your own writes. For example if you send two ``Update`` messages +changing the value of the same ``key``, the ``modify`` function of the second message will +see the change that was performed by the first ``Update`` message. + +In the ``Update`` message you can pass an optional request context, which the ``Replicator`` +does not care about, but is included in the reply messages. This is a convenient +way to pass contextual information (e.g. original sender) without having to use ``ask`` +or maintain local correlation data structures. + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#update-request-context + +.. _replicator_get_java: + +Get +--- + +To retrieve the current value of a data you send ``Replicator.Get`` message to the +``Replicator``. You supply a consistency level which has the following meaning: + +* ``readLocal`` the value will only be read from the local replica +* ``readFrom(n)`` the value will be read and merged from ``n`` replicas, + including the local replica +* ``readMajority`` the value will be read and merged from a majority of replicas, i.e. + at least **N/2 + 1** replicas, where N is the number of nodes in the cluster + (or cluster role group) +* ``readAll`` the value will be read and merged from all nodes in the cluster + (or all nodes in the cluster role group) + + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#get + +As reply of the ``Get`` a ``Replicator.GetSuccess`` is sent to the sender of the +``Get`` if the value was successfully retrieved according to the supplied consistency +level within the supplied timeout. Otherwise a ``Replicator.GetFailure`` is sent. +If the key does not exist the reply will be ``Replicator.NotFound``. + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#get-response1 + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#get-response2 + +You will always read your own writes. For example if you send a ``Update`` message +followed by a ``Get`` of the same ``key`` the ``Get`` will retrieve the change that was +performed by the preceding ``Update`` message. However, the order of the reply messages are +not defined, i.e. in the previous example you may receive the ``GetSuccess`` before +the ``UpdateSuccess``. + +In the ``Get`` message you can pass an optional request context in the same way as for the +``Update`` message, described above. For example the original sender can be passed and replied +to after receiving and transforming ``GetSuccess``. + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#get-request-context + +Consistency +----------- + +The consistency level that is supplied in the :ref:`replicator_update_java` and :ref:`replicator_get_java` +specifies per request how many replicas that must respond successfully to a write and read request. + +For low latency reads you use ``ReadLocal`` with the risk of retrieving stale data, i.e. updates +from other nodes might not be visible yet. + +When using ``writeLocal`` the update is only written to the local replica and then disseminated +in the background with the gossip protocol, which can take few seconds to spread to all nodes. + +``writeAll`` and ``readAll`` is the strongest consistency level, but also the slowest and with +lowest availability. For example, it is enough that one node is unavailable for a ``Get`` request +and you will not receive the value. + +If consistency is important, you can ensure that a read always reflects the most recent +write by using the following formula:: + + (nodes_written + nodes_read) > N + +where N is the total number of nodes in the cluster, or the number of nodes with the role that is +used for the ``Replicator``. + +For example, in a 7 node cluster this these consistency properties are achieved by writing to 4 nodes +and reading from 4 nodes, or writing to 5 nodes and reading from 3 nodes. + +By combining ``writeMajority`` and ``readMajority`` levels a read always reflects the most recent write. +The ``Replicator`` writes and reads to a majority of replicas, i.e. **N / 2 + 1**. For example, +in a 5 node cluster it writes to 3 nodes and reads from 3 nodes. In a 6 node cluster it writes +to 4 nodes and reads from 4 nodes. + +Here is an example of using ``writeMajority`` and ``readMajority``: + +**FIXME convert this example to Java** + +.. includecode:: ../../../akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedShoppingCartSpec.scala#read-write-majority + +.. includecode:: ../../../akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedShoppingCartSpec.scala#get-cart + +.. includecode:: ../../../akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedShoppingCartSpec.scala#add-item + +In some rare cases, when performing an ``Update`` it is needed to first try to fetch latest data from +other nodes. That can be done by first sending a ``Get`` with ``ReadMajority`` and then continue with +the ``Update`` when the ``GetSuccess``, ``GetFailure`` or ``NotFound`` reply is received. This might be +needed when you need to base a decision on latest information or when removing entries from ``ORSet`` +or ``ORMap``. If an entry is added to an ``ORSet`` or ``ORMap`` from one node and removed from another +node the entry will only be removed if the added entry is visible on the node where the removal is +performed (hence the name observed-removed set). + +The following example illustrates how to do that: + +**FIXME convert this example to Java** + +.. includecode:: ../../../akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedShoppingCartSpec.scala#remove-item + +.. warning:: + + *Caveat:* Even if you use ``writeMajority`` and ``readMajority`` there is small risk that you may + read stale data if the cluster membership has changed between the ``Update`` and the ``Get``. + For example, in cluster of 5 nodes when you ``Update`` and that change is written to 3 nodes: + n1, n2, n3. Then 2 more nodes are added and a ``Get`` request is reading from 4 nodes, which + happens to be n4, n5, n6, n7, i.e. the value on n1, n2, n3 is not seen in the response of the + ``Get`` request. + +Subscribe +--------- + +You may also register interest in change notifications by sending ``Replicator.Subscribe`` +message to the ``Replicator``. It will send ``Replicator.Changed`` messages to the registered +subscriber when the data for the subscribed key is updated. Subscribers will be notified +periodically with the configured ``notify-subscribers-interval``, and it is also possible to +send an explicit ``Replicator.FlushChanges`` message to the ``Replicator`` to notify the subscribers +immediately. + +The subscriber is automatically removed if the subscriber is terminated. A subscriber can +also be deregistered with the ``Replicator.Unsubscribe`` message. + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#subscribe + +Delete +------ + +A data entry can be deleted by sending a ``Replicator.Delete`` message to the local +local ``Replicator``. As reply of the ``Delete`` a ``Replicator.DeleteSuccess`` is sent to +the sender of the ``Delete`` if the value was successfully deleted according to the supplied +consistency level within the supplied timeout. Otherwise a ``Replicator.ReplicationDeleteFailure`` +is sent. Note that ``ReplicationDeleteFailure`` does not mean that the delete completely failed or +was rolled back. It may still have been replicated to some nodes, and may eventually be replicated +to all nodes. + +A deleted key cannot be reused again, but it is still recommended to delete unused +data entries because that reduces the replication overhead when new nodes join the cluster. +Subsequent ``Delete``, ``Update`` and ``Get`` requests will be replied with ``Replicator.DataDeleted``. +Subscribers will receive ``Replicator.DataDeleted``. + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#delete + +Data Types +========== + +The data types must be convergent (stateful) CRDTs and implement the ``ReplicatedData`` trait, +i.e. they provide a monotonic merge function and the state changes always converge. + +You can use your own custom ``ReplicatedData`` types, and several types are provided +by this package, such as: + +* Counters: ``GCounter``, ``PNCounter`` +* Sets: ``GSet``, ``ORSet`` +* Maps: ``ORMap``, ``LWWMap``, ``PNCounterMap`` +* Registers: ``LWWRegister``, ``Flag`` + +Counters +-------- + +``GCounter`` is a "grow only counter". It only supports increments, no decrements. + +It works in a similar way as a vector clock. It keeps track of one counter per node and the total +value is the sum of these counters. The ``merge`` is implemented by taking the maximum count for +each node. + +If you need both increments and decrements you can use the ``PNCounter`` (positive/negative counter). + +It is tracking the increments (P) separate from the decrements (N). Both P and N are represented +as two internal ``GCounter``. Merge is handled by merging the internal P and N counters. +The value of the counter is the value of the P counter minus the value of the N counter. + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#pncounter + +Several related counters can be managed in a map with the ``PNCounterMap`` data type. +When the counters are placed in a ``PNCounterMap`` as opposed to placing them as separate top level +values they are guaranteed to be replicated together as one unit, which is sometimes necessary for +related data. + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#pncountermap + +Sets +---- + +If you only need to add elements to a set and not remove elements the ``GSet`` (grow-only set) is +the data type to use. The elements can be any type of values that can be serialized. +Merge is simply the union of the two sets. + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#gset + +If you need add and remove operations you should use the ``ORSet`` (observed-remove set). +Elements can be added and removed any number of times. If an element is concurrently added and +removed, the add will win. You cannot remove an element that you have not seen. + +The ``ORSet`` has a version vector that is incremented when an element is added to the set. +The version for the node that added the element is also tracked for each element in a so +called "birth dot". The version vector and the dots are used by the ``merge`` function to +track causality of the operations and resolve concurrent updates. + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#orset + +Maps +---- + +``ORMap`` (observed-remove map) is a map with ``String`` keys and the values are ``ReplicatedData`` +types themselves. It supports add, remove and delete any number of times for a map entry. + +If an entry is concurrently added and removed, the add will win. You cannot remove an entry that +you have not seen. This is the same semantics as for the ``ORSet``. + +If an entry is concurrently updated to different values the values will be merged, hence the +requirement that the values must be ``ReplicatedData`` types. + +It is rather inconvenient to use the ``ORMap`` directly since it does not expose specific types +of the values. The ``ORMap`` is intended as a low level tool for building more specific maps, +such as the following specialized maps. + +``PNCounterMap`` (positive negative counter map) is a map of named counters. It is a specialized +``ORMap`` with ``PNCounter`` values. + +``LWWMap`` (last writer wins map) is a specialized ``ORMap`` with ``LWWRegister`` (last writer wins register) +values. + +Note that ``LWWRegister`` and therefore ``LWWMap`` relies on synchronized clocks and should only be used +when the choice of value is not important for concurrent updates occurring within the clock skew. + +Instead of using timestamps based on ``System.currentTimeMillis()`` time it is possible to +use a timestamp value based on something else, for example an increasing version number +from a database record that is used for optimistic concurrency control. + +When a data entry is changed the full state of that entry is replicated to other nodes, i.e. +when you update an map the whole map is replicated. Therefore, instead of using one ``ORMap`` +with 1000 elements it is more efficient to split that up in 10 top level ``ORMap`` entries +with 100 elements each. Top level entries are replicated individually, which has the +trade-off that different entries may not be replicated at the same time and you may see +inconsistencies between related entries. Separate top level entries cannot be updated atomically +together. + +Flags and Registers +------------------- + +``Flag`` is a data type for a boolean value that is initialized to ``false`` and can be switched +to ``true``. Thereafter it cannot be changed. ``true`` wins over ``false`` in merge. + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#flag + +``LWWRegister`` (last writer wins register) can hold any (serializable) value. + +Merge of a ``LWWRegister`` takes the the register with highest timestamp. Note that this +relies on synchronized clocks. `LWWRegister` should only be used when the choice of +value is not important for concurrent updates occurring within the clock skew. + +Merge takes the register updated by the node with lowest address (``UniqueAddress`` is ordered) +if the timestamps are exactly the same. + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#lwwregister + +Instead of using timestamps based on ``System.currentTimeMillis()`` time it is possible to +use a timestamp value based on something else, for example an increasing version number +from a database record that is used for optimistic concurrency control. + +.. includecode:: code/docs/ddata/DistributedDataDocTest.java#lwwregister-custom-clock + +For first-write-wins semantics you can use the ``LWWRegister#reverseClock`` instead of the +``LWWRegister#defaultClock``. + +Custom Data Type +---------------- + +You can rather easily implement your own data types. The only requirement is that it implements +the ``merge`` function of the ``AbstractReplicatedData`` class. + +A nice property of stateful CRDTs is that they typically compose nicely, i.e. you can combine several +smaller data types to build richer data structures. For example, the ``PNCounter`` is composed of +two internal ``GCounter`` instances to keep track of increments and decrements separately. + +Here is s simple implementation of a custom ``TwoPhaseSet`` that is using two internal ``GSet`` types +to keep track of addition and removals. A ``TwoPhaseSet`` is a set where an element may be added and +removed, but never added again thereafter. + +**FIXME convert this example to Java** + +.. includecode:: ../scala/code/docs/ddata/TwoPhaseSet.scala#twophaseset + +Data types should be immutable, i.e. "modifying" methods should return a new instance. + +Serialization +^^^^^^^^^^^^^ + +The data types must be serializable with an :ref:`Akka Serializer `. +It is highly recommended that you implement efficient serialization with Protobuf or similar +for your custom data types. The built in data types are marked with ``ReplicatedDataSerialization`` +and serialized with ``akka.cluster.ddata.protobuf.ReplicatedDataSerializer``. + +Serialization of the data types are used in remote messages and also for creating message +digests (SHA-1) to detect changes. Therefore it is important that the serialization is efficient +and produce the same bytes for the same content. For example sets and maps should be sorted +deterministically in the serialization. + +This is a protobuf representation of the above ``TwoPhaseSet``: + +.. includecode:: ../../src/main/protobuf/TwoPhaseSetMessages.proto#twophaseset + +The serializer for the ``TwoPhaseSet``: + +**FIXME convert this example to Java** + +.. includecode:: ../scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer.scala#serializer + +Note that the elements of the sets are sorted so the SHA-1 digests are the same +for the same elements. + +You register the serializer in configuration: + +.. includecode:: ../scala/code/docs/ddata/DistributedDataDocSpec.scala#serializer-config + +Using compression can sometimes be a good idea to reduce the data size. Gzip compression is +provided by the ``akka.cluster.ddata.protobuf.SerializationSupport`` trait: + +**FIXME convert this example to Java** + +.. includecode:: ../scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer.scala#compression + +The two embedded ``GSet`` can be serialized as illustrated above, but in general when composing +new data types from the existing built in types it is better to make use of the existing +serializer for those types. This can be done by declaring those as bytes fields in protobuf: + +.. includecode:: ../../src/main/protobuf/TwoPhaseSetMessages.proto#twophaseset2 + +and use the methods ``otherMessageToProto`` and ``otherMessageFromBinary`` that are provided +by the ``SerializationSupport`` trait to serialize and deserialize the ``GSet`` instances. This +works with any type that has a registered Akka serializer. This is how such an serializer would +look like for the ``TwoPhaseSet``: + +**FIXME convert this example to Java** + +.. includecode:: ../scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala#serializer + + +CRDT Garbage +------------ + +One thing that can be problematic with CRDTs is that some data types accumulate history (garbage). +For example a ``GCounter`` keeps track of one counter per node. If a ``GCounter`` has been updated +from one node it will associate the identifier of that node forever. That can become a problem +for long running systems with many cluster nodes being added and removed. To solve this problem +the ``Replicator`` performs pruning of data associated with nodes that have been removed from the +cluster. Data types that need pruning have to implement the ``RemovedNodePruning`` trait. + +Samples +======= + +**FIXME convert these sampes to Java and activator template** + +* `Replicated Cache <@github@/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedCacheSpec.scala>`_ +* `Replicated Metrics <@github@/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedMetricsSpec.scala>`_ +* `Replicated Service Registry <@github@/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedServiceRegistrySpec.scala>`_ +* `VotingService <@github@/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/VotingContestSpec.scala>`_ +* `ShoppingCart <@github@/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedShoppingCartSpec.scala>`_ + +Limitations +=========== + +There are some limitations that you should be aware of. + +CRDTs cannot be used for all types of problems, and eventual consistency does not fit +all domains. Sometimes you need strong consistency. + +It is not intended for *Big Data*. The number of top level entries should not exceed 100000. +When a new node is added to the cluster all these entries are transferred (gossiped) to the +new node. The entries are split up in chunks and all existing nodes collaborate in the gossip, +but it will take a while (tens of seconds) to transfer all entries and this means that you +cannot have too many top level entries. The current recommended limit is 100000. We will +be able to improve this if needed, but the design is still not intended for billions of entries. + +All data is held in memory, which is another reason why it is not intended for *Big Data*. + +When a data entry is changed the full state of that entry is replicated to other nodes. For example, +if you add one element to a Set with 100 existing elements, all 101 elements are transferred to +other nodes. This means that you cannot have too large data entries, because then the remote message +size will be too large. We might be able to make this more efficient by implementing +`Efficient State-based CRDTs by Delta-Mutation `_. + +The data is only kept in memory. It is redundant since it is replicated to other nodes +in the cluster, but if you stop all nodes the data is lost, unless you have saved it +elsewhere. Making the data durable is a possible future feature, but even if we implement that +it is not intended to be a full featured database. + +Learn More about CRDTs +====================== + +* `The Final Causal Frontier `_ + talk by Sean Cribbs +* `Eventually Consistent Data Structures `_ + talk by Sean Cribbs +* `Strong Eventual Consistency and Conflict-free Replicated Data Types `_ + talk by Mark Shapiro +* `A comprehensive study of Convergent and Commutative Replicated Data Types `_ + paper by Mark Shapiro et. al. + +Dependencies +------------ + +To use Distributed Data you must add the following dependency in your project. + +sbt:: + + "com.typesafe.akka" %% "akka-distributed-data" % "@version@" @crossString@ + +maven:: + + + com.typesafe.akka + akka-distributed-data_@binVersion@ + @version@ + + +Configuration +============= + +The ``DistributedData`` extension can be configured with the following properties: + +.. includecode:: ../../../akka-distributed-data/src/main/resources/reference.conf#distributed-data + \ No newline at end of file diff --git a/akka-docs/rst/java/index-network.rst b/akka-docs/rst/java/index-network.rst index 5bc5e8dbd0..01b80308d7 100644 --- a/akka-docs/rst/java/index-network.rst +++ b/akka-docs/rst/java/index-network.rst @@ -11,6 +11,7 @@ Networking ../scala/cluster-client ../scala/cluster-sharding cluster-metrics + distributed-data remoting serialization io diff --git a/akka-docs/rst/scala/code/docs/ddata/DistributedDataDocSpec.scala b/akka-docs/rst/scala/code/docs/ddata/DistributedDataDocSpec.scala new file mode 100644 index 0000000000..045ee0aeb1 --- /dev/null +++ b/akka-docs/rst/scala/code/docs/ddata/DistributedDataDocSpec.scala @@ -0,0 +1,381 @@ +/** + * Copyright (C) 2015 Typesafe Inc. + */ +package docs.ddata + +import scala.concurrent.duration._ +import scala.concurrent.forkjoin.ThreadLocalRandom +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.cluster.Cluster +import akka.cluster.ddata._ +import akka.cluster.ddata.Replicator._ +import akka.testkit.AkkaSpec +import akka.testkit.ImplicitSender +import akka.testkit.TestProbe +import akka.actor.ActorRef +import akka.serialization.SerializationExtension + +object DistributedDataDocSpec { + + val config = + """ + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.remote.netty.tcp.port = 0 + + #//#serializer-config + akka.actor { + serializers { + two-phase-set = "docs.ddata.protobuf.TwoPhaseSetSerializer" + } + serialization-bindings { + "docs.ddata.TwoPhaseSet" = two-phase-set + } + } + #//#serializer-config + """ + + //#data-bot + import scala.concurrent.forkjoin.ThreadLocalRandom + import akka.actor.Actor + import akka.actor.ActorLogging + import akka.cluster.Cluster + import akka.cluster.ddata.DistributedData + import akka.cluster.ddata.ORSet + import akka.cluster.ddata.ORSetKey + import akka.cluster.ddata.Replicator + import akka.cluster.ddata.Replicator._ + + object DataBot { + private case object Tick + } + + class DataBot extends Actor with ActorLogging { + import DataBot._ + + val replicator = DistributedData(context.system).replicator + implicit val node = Cluster(context.system) + + import context.dispatcher + val tickTask = context.system.scheduler.schedule(5.seconds, 5.seconds, self, Tick) + + val DataKey = ORSetKey[String]("key") + + replicator ! Subscribe(DataKey, self) + + def receive = { + case Tick => + val s = ThreadLocalRandom.current().nextInt(97, 123).toChar.toString + if (ThreadLocalRandom.current().nextBoolean()) { + // add + log.info("Adding: {}", s) + replicator ! Update(DataKey, ORSet.empty[String], WriteLocal)(_ + s) + } else { + // remove + log.info("Removing: {}", s) + replicator ! Update(DataKey, ORSet.empty[String], WriteLocal)(_ - s) + } + + case _: UpdateResponse[_] => // ignore + + case c @ Changed(DataKey) => + val data = c.get(DataKey) + log.info("Current elements: {}", data.elements) + } + + override def postStop(): Unit = tickTask.cancel() + + } + //#data-bot + +} + +class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { + import Replicator._ + + import DistributedDataDocSpec._ + + "demonstrate update" in { + val probe = TestProbe() + implicit val self = probe.ref + + //#update + implicit val node = Cluster(system) + val replicator = DistributedData(system).replicator + + val Counter1Key = PNCounterKey("counter1") + val Set1Key = GSetKey[String]("set1") + val Set2Key = ORSetKey[String]("set2") + val ActiveFlagKey = FlagKey("active") + + replicator ! Update(Counter1Key, PNCounter(), WriteLocal)(_ + 1) + + val writeTo3 = WriteTo(n = 3, timeout = 1.second) + replicator ! Update(Set1Key, GSet.empty[String], writeTo3)(_ + "hello") + + val writeMajority = WriteMajority(timeout = 5.seconds) + replicator ! Update(Set2Key, ORSet.empty[String], writeMajority)(_ + "hello") + + val writeAll = WriteAll(timeout = 5.seconds) + replicator ! Update(ActiveFlagKey, Flag.empty, writeAll)(_.switchOn) + //#update + + probe.expectMsgType[UpdateResponse[_]] match { + //#update-response1 + case UpdateSuccess(Counter1Key, req) => // ok + //#update-response1 + case unexpected => fail("Unexpected response: " + unexpected) + } + + probe.expectMsgType[UpdateResponse[_]] match { + //#update-response2 + case UpdateSuccess(Set1Key, req) => // ok + case UpdateTimeout(Set1Key, req) => + // write to 3 nodes failed within 1.second + //#update-response2 + case UpdateSuccess(Set2Key, None) => + case unexpected => fail("Unexpected response: " + unexpected) + } + } + + "demonstrate update with request context" in { + import Actor.Receive + val probe = TestProbe() + implicit val self = probe.ref + def sender() = self + + //#update-request-context + implicit val node = Cluster(system) + val replicator = DistributedData(system).replicator + val writeTwo = WriteTo(n = 2, timeout = 3.second) + val Counter1Key = PNCounterKey("counter1") + + def receive: Receive = { + case "increment" => + // incoming command to increase the counter + val upd = Update(Counter1Key, PNCounter(), writeTwo, request = Some(sender()))(_ + 1) + replicator ! upd + + case UpdateSuccess(Counter1Key, Some(replyTo: ActorRef)) => + replyTo ! "ack" + case UpdateTimeout(Counter1Key, Some(replyTo: ActorRef)) => + replyTo ! "nack" + } + //#update-request-context + } + + "demonstrate get" in { + val probe = TestProbe() + implicit val self = probe.ref + + //#get + val replicator = DistributedData(system).replicator + val Counter1Key = PNCounterKey("counter1") + val Set1Key = GSetKey[String]("set1") + val Set2Key = ORSetKey[String]("set2") + val ActiveFlagKey = FlagKey("active") + + replicator ! Get(Counter1Key, ReadLocal) + + val readFrom3 = ReadFrom(n = 3, timeout = 1.second) + replicator ! Get(Set1Key, readFrom3) + + val readMajority = ReadMajority(timeout = 5.seconds) + replicator ! Get(Set2Key, readMajority) + + val readAll = ReadAll(timeout = 5.seconds) + replicator ! Get(ActiveFlagKey, readAll) + //#get + + probe.expectMsgType[GetResponse[_]] match { + //#get-response1 + case g @ GetSuccess(Counter1Key, req) => + val value = g.get(Counter1Key).value + case NotFound(Counter1Key, req) => // key counter1 does not exist + //#get-response1 + case unexpected => fail("Unexpected response: " + unexpected) + } + + probe.expectMsgType[GetResponse[_]] match { + //#get-response2 + case g @ GetSuccess(Set1Key, req) => + val elements = g.get(Set1Key).elements + case GetFailure(Set1Key, req) => + // read from 3 nodes failed within 1.second + case NotFound(Set1Key, req) => // key set1 does not exist + //#get-response2 + case g @ GetSuccess(Set2Key, None) => + val elements = g.get(Set2Key).elements + case unexpected => fail("Unexpected response: " + unexpected) + } + } + + "demonstrate get with request context" in { + import Actor.Receive + val probe = TestProbe() + implicit val self = probe.ref + def sender() = self + + //#get-request-context + implicit val node = Cluster(system) + val replicator = DistributedData(system).replicator + val readTwo = ReadFrom(n = 2, timeout = 3.second) + val Counter1Key = PNCounterKey("counter1") + + def receive: Receive = { + case "get-count" => + // incoming request to retrieve current value of the counter + replicator ! Get(Counter1Key, readTwo, request = Some(sender())) + + case g @ GetSuccess(Counter1Key, Some(replyTo: ActorRef)) => + val value = g.get(Counter1Key).value.longValue + replyTo ! value + case GetFailure(Counter1Key, Some(replyTo: ActorRef)) => + replyTo ! -1L + case NotFound(Counter1Key, Some(replyTo: ActorRef)) => + replyTo ! 0L + } + //#get-request-context + } + + "demonstrate subscribe" in { + import Actor.Receive + val probe = TestProbe() + implicit val self = probe.ref + def sender() = self + + //#subscribe + val replicator = DistributedData(system).replicator + val Counter1Key = PNCounterKey("counter1") + // subscribe to changes of the Counter1Key value + replicator ! Subscribe(Counter1Key, self) + var currentValue = BigInt(0) + + def receive: Receive = { + case c @ Changed(Counter1Key) => + currentValue = c.get(Counter1Key).value + case "get-count" => + // incoming request to retrieve current value of the counter + sender() ! currentValue + } + //#subscribe + } + + "demonstrate delete" in { + val probe = TestProbe() + implicit val self = probe.ref + + //#delete + val replicator = DistributedData(system).replicator + val Counter1Key = PNCounterKey("counter1") + val Set2Key = ORSetKey[String]("set2") + + replicator ! Delete(Counter1Key, WriteLocal) + + val writeMajority = WriteMajority(timeout = 5.seconds) + replicator ! Delete(Set2Key, writeMajority) + //#delete + } + + "demonstrate PNCounter" in { + def println(o: Any): Unit = () + //#pncounter + implicit val node = Cluster(system) + val c0 = PNCounter.empty + val c1 = c0 + 1 + val c2 = c1 + 7 + val c3: PNCounter = c2 - 2 + println(c3.value) // 6 + //#pncounter + } + + "demonstrate PNCounterMap" in { + def println(o: Any): Unit = () + //#pncountermap + implicit val node = Cluster(system) + val m0 = PNCounterMap.empty + val m1 = m0.increment("a", 7) + val m2 = m1.decrement("a", 2) + val m3 = m2.increment("b", 1) + println(m3.get("a")) // 5 + m3.entries.foreach { case (key, value) => println(s"$key -> $value") } + //#pncountermap + } + + "demonstrate GSet" in { + def println(o: Any): Unit = () + //#gset + val s0 = GSet.empty[String] + val s1 = s0 + "a" + val s2 = s1 + "b" + "c" + if (s2.contains("a")) + println(s2.elements) // a, b, c + //#gset + } + + "demonstrate ORSet" in { + def println(o: Any): Unit = () + //#orset + implicit val node = Cluster(system) + val s0 = ORSet.empty[String] + val s1 = s0 + "a" + val s2 = s1 + "b" + val s3 = s2 - "a" + println(s3.elements) // b + //#orset + } + + "demonstrate Flag" in { + def println(o: Any): Unit = () + //#flag + val f0 = Flag.empty + val f1 = f0.switchOn + println(f1.enabled) + //#flag + } + + "demonstrate LWWRegister" in { + def println(o: Any): Unit = () + //#lwwregister + implicit val node = Cluster(system) + val r1 = LWWRegister("Hello") + val r2 = r1.withValue("Hi") + println(s"${r1.value} by ${r1.updatedBy} at ${r1.timestamp}") + //#lwwregister + r2.value should be("Hi") + } + + "demonstrate LWWRegister with custom clock" in { + def println(o: Any): Unit = () + //#lwwregister-custom-clock + case class Record(version: Int, name: String, address: String) + + implicit val node = Cluster(system) + implicit val recordClock = new LWWRegister.Clock[Record] { + override def apply(currentTimestamp: Long, value: Record): Long = + value.version + } + + val record1 = Record(version = 1, "Alice", "Union Square") + val r1 = LWWRegister(record1) + + val record2 = Record(version = 2, "Alice", "Madison Square") + val r2 = LWWRegister(record2) + + val r3 = r1.merge(r2) + println(r3.value) + //#lwwregister-custom-clock + + r3.value.address should be("Madison Square") + } + + "test TwoPhaseSetSerializer" in { + val s1 = TwoPhaseSet().add("a").add("b").add("c").remove("b") + s1.elements should be(Set("a", "c")) + val serializer = SerializationExtension(system).findSerializerFor(s1) + val blob = serializer.toBinary(s1) + val s2 = serializer.fromBinary(blob, None) + s1 should be(s1) + } + +} diff --git a/akka-docs/rst/scala/code/docs/ddata/TwoPhaseSet.scala b/akka-docs/rst/scala/code/docs/ddata/TwoPhaseSet.scala new file mode 100644 index 0000000000..4afa40b96b --- /dev/null +++ b/akka-docs/rst/scala/code/docs/ddata/TwoPhaseSet.scala @@ -0,0 +1,29 @@ +/** + * Copyright (C) 2015 Typesafe Inc. + */ +package docs.ddata + +import akka.cluster.ddata.ReplicatedData +import akka.cluster.ddata.GSet + +//#twophaseset +case class TwoPhaseSet( + adds: GSet[String] = GSet.empty, + removals: GSet[String] = GSet.empty) + extends ReplicatedData { + type T = TwoPhaseSet + + def add(element: String): TwoPhaseSet = + copy(adds = adds.add(element)) + + def remove(element: String): TwoPhaseSet = + copy(removals = removals.add(element)) + + def elements: Set[String] = adds.elements -- removals.elements + + override def merge(that: TwoPhaseSet): TwoPhaseSet = + copy( + adds = GSet(this.adds.elements ++ that.adds.elements), + removals = GSet(this.removals.elements ++ that.removals.elements)) +} +//#twophaseset diff --git a/akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer.scala b/akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer.scala new file mode 100644 index 0000000000..98e67f62ad --- /dev/null +++ b/akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer.scala @@ -0,0 +1,75 @@ +/** + * Copyright (C) 2015 Typesafe Inc. + */ +package docs.ddata.protobuf + +//#serializer +import java.util.ArrayList +import java.util.Collections +import scala.collection.JavaConverters._ +import akka.actor.ExtendedActorSystem +import akka.cluster.ddata.GSet +import akka.cluster.ddata.protobuf.SerializationSupport +import akka.serialization.Serializer +import docs.ddata.TwoPhaseSet +import docs.ddata.protobuf.msg.TwoPhaseSetMessages + +class TwoPhaseSetSerializer(val system: ExtendedActorSystem) + extends Serializer with SerializationSupport { + + override def includeManifest: Boolean = false + + override def identifier = 99999 + + override def toBinary(obj: AnyRef): Array[Byte] = obj match { + case m: TwoPhaseSet ⇒ twoPhaseSetToProto(m).toByteArray + case _ ⇒ throw new IllegalArgumentException( + s"Can't serialize object of type ${obj.getClass}") + } + + override def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { + twoPhaseSetFromBinary(bytes) + } + + def twoPhaseSetToProto(twoPhaseSet: TwoPhaseSet): TwoPhaseSetMessages.TwoPhaseSet = { + val b = TwoPhaseSetMessages.TwoPhaseSet.newBuilder() + // using java collections and sorting for performance (avoid conversions) + val adds = new ArrayList[String] + twoPhaseSet.adds.elements.foreach(adds.add) + if (!adds.isEmpty) { + Collections.sort(adds) + b.addAllAdds(adds) + } + val removals = new ArrayList[String] + twoPhaseSet.removals.elements.foreach(removals.add) + if (!removals.isEmpty) { + Collections.sort(removals) + b.addAllRemovals(removals) + } + b.build() + } + + def twoPhaseSetFromBinary(bytes: Array[Byte]): TwoPhaseSet = { + val msg = TwoPhaseSetMessages.TwoPhaseSet.parseFrom(bytes) + TwoPhaseSet( + adds = GSet(msg.getAddsList.iterator.asScala.toSet), + removals = GSet(msg.getRemovalsList.iterator.asScala.toSet)) + } +} +//#serializer + +class TwoPhaseSetSerializerWithCompression(system: ExtendedActorSystem) + extends TwoPhaseSetSerializer(system) { + //#compression + override def toBinary(obj: AnyRef): Array[Byte] = obj match { + case m: TwoPhaseSet ⇒ compress(twoPhaseSetToProto(m)) + case _ ⇒ throw new IllegalArgumentException( + s"Can't serialize object of type ${obj.getClass}") + } + + override def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { + twoPhaseSetFromBinary(decompress(bytes)) + } + //#compression +} + diff --git a/akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala b/akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala new file mode 100644 index 0000000000..a625ca5261 --- /dev/null +++ b/akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala @@ -0,0 +1,59 @@ +/** + * Copyright (C) 2015 Typesafe Inc. + */ +package docs.ddata.protobuf + +//#serializer +import scala.collection.JavaConverters._ +import akka.actor.ExtendedActorSystem +import akka.cluster.ddata.GSet +import akka.cluster.ddata.protobuf.ReplicatedDataSerializer +import akka.cluster.ddata.protobuf.SerializationSupport +import akka.serialization.Serializer +import docs.ddata.TwoPhaseSet +import docs.ddata.protobuf.msg.TwoPhaseSetMessages + +class TwoPhaseSetSerializer2(val system: ExtendedActorSystem) + extends Serializer with SerializationSupport { + + override def includeManifest: Boolean = false + + override def identifier = 99999 + + val replicatedDataSerializer = new ReplicatedDataSerializer(system) + + override def toBinary(obj: AnyRef): Array[Byte] = obj match { + case m: TwoPhaseSet ⇒ twoPhaseSetToProto(m).toByteArray + case _ ⇒ throw new IllegalArgumentException( + s"Can't serialize object of type ${obj.getClass}") + } + + override def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { + twoPhaseSetFromBinary(bytes) + } + + def twoPhaseSetToProto(twoPhaseSet: TwoPhaseSet): TwoPhaseSetMessages.TwoPhaseSet2 = { + val b = TwoPhaseSetMessages.TwoPhaseSet2.newBuilder() + if (!twoPhaseSet.adds.isEmpty) + b.setAdds(otherMessageToProto(twoPhaseSet.adds).toByteString()) + if (!twoPhaseSet.removals.isEmpty) + b.setRemovals(otherMessageToProto(twoPhaseSet.removals).toByteString()) + b.build() + } + + def twoPhaseSetFromBinary(bytes: Array[Byte]): TwoPhaseSet = { + val msg = TwoPhaseSetMessages.TwoPhaseSet2.parseFrom(bytes) + val adds = + if (msg.hasAdds) + otherMessageFromBinary(msg.getAdds.toByteArray).asInstanceOf[GSet[String]] + else + GSet.empty[String] + val removals = + if (msg.hasRemovals) + otherMessageFromBinary(msg.getRemovals.toByteArray).asInstanceOf[GSet[String]] + else + GSet.empty[String] + TwoPhaseSet(adds, removals) + } +} +//#serializer diff --git a/akka-docs/rst/scala/distributed-data.rst b/akka-docs/rst/scala/distributed-data.rst new file mode 100644 index 0000000000..7862204acd --- /dev/null +++ b/akka-docs/rst/scala/distributed-data.rst @@ -0,0 +1,504 @@ + +.. _distributed_data_scala: + +################## + Distributed Data +################## + +*Akka Distributed Data* is useful when you need to share data between nodes in an +Akka Cluster. The data is accessed with an actor providing a key-value store like API. +The keys are unique identifiers with type information of the data values. The values +are *Conflict Free Replicated Data Types* (CRDTs). + +All data entries are spread to all nodes, or nodes with a certain role, in the cluster +via direct replication and gossip based dissemination. You have fine grained control +of the consistency level for reads and writes. + +The nature CRDTs makes it possible to perform updates from any node without coordination. +Concurrent updates from different nodes will automatically be resolved by the monotonic +merge function, which all data types must provide. The state changes always converge. +Several useful data types for counters, sets, maps and registers are provided and +you can also implement your own custom data types. + +It is eventually consistent and geared toward providing high read and write availability +(partition tolerance), with low latency. Note that in an eventually consistent system a read may return an +out-of-date value. + +Using the Replicator +==================== + +The ``akka.cluster.ddata.Replicator`` actor provides the API for interacting with the data. +The ``Replicator`` actor must be started on each node in the cluster, or group of nodes tagged +with a specific role. It communicates with other ``Replicator`` instances with the same path +(without address) that are running on other nodes . For convenience it can be used with the +``akka.cluster.ddata.DistributedData`` extension. + +Below is an example of an actor that schedules tick messages to itself and for each tick +adds or removes elements from a ``ORSet`` (observed-remove set). It also subscribes to +changes of this. + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#data-bot + +.. _replicator_update_scala: + +Update +------ + +To modify and replicate a data value you send a ``Replicator.Update`` message to the the local +``Replicator``. + +The current data value for the ``key`` of the ``Update`` is passed as parameter to the ``modify`` +function of the ``Update``. The function is supposed to return the new value of the data, which +will then be replicated according to the given consistency level. + +The ``modify`` function is called by the ``Replicator`` actor and must therefore be a pure +function that only uses the data parameter and stable fields from enclosing scope. It must +for example not access ``sender()`` reference of an enclosing actor. + +``Update`` is intended to only be sent from an actor running in same local ``ActorSystem`` as + * the `Replicator`, because the `modify` function is typically not serializable. + +You supply a write consistency level which has the following meaning: + +* ``WriteLocal`` the value will immediately only be written to the local replica, + and later disseminated with gossip +* ``WriteTo(n)`` the value will immediately be written to at least ``n`` replicas, + including the local replica +* ``WriteMajority`` the value will immediately be written to a majority of replicas, i.e. + at least **N/2 + 1** replicas, where N is the number of nodes in the cluster + (or cluster role group) +* ``WriteAll`` the value will immediately be written to all nodes in the cluster + (or all nodes in the cluster role group) + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#update + +As reply of the ``Update`` a ``Replicator.UpdateSuccess`` is sent to the sender of the +``Update`` if the value was successfully replicated according to the supplied consistency +level within the supplied timeout. Otherwise a ``Replicator.UpdateFailure`` subclass is +sent back. Note that a ``Replicator.UpdateTimeout`` reply does not mean that the update completely failed +or was rolled back. It may still have been replicated to some nodes, and will eventually +be replicated to all nodes with the gossip protocol. + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#update-response1 + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#update-response2 + +You will always see your own writes. For example if you send two ``Update`` messages +changing the value of the same ``key``, the ``modify`` function of the second message will +see the change that was performed by the first ``Update`` message. + +In the ``Update`` message you can pass an optional request context, which the ``Replicator`` +does not care about, but is included in the reply messages. This is a convenient +way to pass contextual information (e.g. original sender) without having to use ``ask`` +or maintain local correlation data structures. + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#update-request-context + +.. _replicator_get_scala: + +Get +--- + +To retrieve the current value of a data you send ``Replicator.Get`` message to the +``Replicator``. You supply a consistency level which has the following meaning: + +* ``ReadLocal`` the value will only be read from the local replica +* ``ReadFrom(n)`` the value will be read and merged from ``n`` replicas, + including the local replica +* ``ReadMajority`` the value will be read and merged from a majority of replicas, i.e. + at least **N/2 + 1** replicas, where N is the number of nodes in the cluster + (or cluster role group) +* ``ReadAll`` the value will be read and merged from all nodes in the cluster + (or all nodes in the cluster role group) + + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#get + +As reply of the ``Get`` a ``Replicator.GetSuccess`` is sent to the sender of the +``Get`` if the value was successfully retrieved according to the supplied consistency +level within the supplied timeout. Otherwise a ``Replicator.GetFailure`` is sent. +If the key does not exist the reply will be ``Replicator.NotFound``. + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#get-response1 + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#get-response2 + +You will always read your own writes. For example if you send a ``Update`` message +followed by a ``Get`` of the same ``key`` the ``Get`` will retrieve the change that was +performed by the preceding ``Update`` message. However, the order of the reply messages are +not defined, i.e. in the previous example you may receive the ``GetSuccess`` before +the ``UpdateSuccess``. + +In the ``Get`` message you can pass an optional request context in the same way as for the +``Update`` message, described above. For example the original sender can be passed and replied +to after receiving and transforming ``GetSuccess``. + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#get-request-context + +Consistency +----------- + +The consistency level that is supplied in the :ref:`replicator_update_scala` and :ref:`replicator_get_scala` +specifies per request how many replicas that must respond successfully to a write and read request. + +For low latency reads you use ``ReadLocal`` with the risk of retrieving stale data, i.e. updates +from other nodes might not be visible yet. + +When using ``WriteLocal`` the update is only written to the local replica and then disseminated +in the background with the gossip protocol, which can take few seconds to spread to all nodes. + +``WriteAll`` and ``ReadAll`` is the strongest consistency level, but also the slowest and with +lowest availability. For example, it is enough that one node is unavailable for a ``Get`` request +and you will not receive the value. + +If consistency is important, you can ensure that a read always reflects the most recent +write by using the following formula:: + + (nodes_written + nodes_read) > N + +where N is the total number of nodes in the cluster, or the number of nodes with the role that is +used for the ``Replicator``. + +For example, in a 7 node cluster this these consistency properties are achieved by writing to 4 nodes +and reading from 4 nodes, or writing to 5 nodes and reading from 3 nodes. + +By combining ``WriteMajority`` and ``ReadMajority`` levels a read always reflects the most recent write. +The ``Replicator`` writes and reads to a majority of replicas, i.e. **N / 2 + 1**. For example, +in a 5 node cluster it writes to 3 nodes and reads from 3 nodes. In a 6 node cluster it writes +to 4 nodes and reads from 4 nodes. + +Here is an example of using ``WriteMajority`` and ``ReadMajority``: + +.. includecode:: ../../../akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedShoppingCartSpec.scala#read-write-majority + +.. includecode:: ../../../akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedShoppingCartSpec.scala#get-cart + +.. includecode:: ../../../akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedShoppingCartSpec.scala#add-item + +In some rare cases, when performing an ``Update`` it is needed to first try to fetch latest data from +other nodes. That can be done by first sending a ``Get`` with ``ReadMajority`` and then continue with +the ``Update`` when the ``GetSuccess``, ``GetFailure`` or ``NotFound`` reply is received. This might be +needed when you need to base a decision on latest information or when removing entries from ``ORSet`` +or ``ORMap``. If an entry is added to an ``ORSet`` or ``ORMap`` from one node and removed from another +node the entry will only be removed if the added entry is visible on the node where the removal is +performed (hence the name observed-removed set). + +The following example illustrates how to do that: + +.. includecode:: ../../../akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedShoppingCartSpec.scala#remove-item + +.. warning:: + + *Caveat:* Even if you use ``WriteMajority`` and ``ReadMajority`` there is small risk that you may + read stale data if the cluster membership has changed between the ``Update`` and the ``Get``. + For example, in cluster of 5 nodes when you ``Update`` and that change is written to 3 nodes: + n1, n2, n3. Then 2 more nodes are added and a ``Get`` request is reading from 4 nodes, which + happens to be n4, n5, n6, n7, i.e. the value on n1, n2, n3 is not seen in the response of the + ``Get`` request. + +Subscribe +--------- + +You may also register interest in change notifications by sending ``Replicator.Subscribe`` +message to the ``Replicator``. It will send ``Replicator.Changed`` messages to the registered +subscriber when the data for the subscribed key is updated. Subscribers will be notified +periodically with the configured ``notify-subscribers-interval``, and it is also possible to +send an explicit ``Replicator.FlushChanges`` message to the ``Replicator`` to notify the subscribers +immediately. + +The subscriber is automatically removed if the subscriber is terminated. A subscriber can +also be deregistered with the ``Replicator.Unsubscribe`` message. + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#subscribe + +Delete +------ + +A data entry can be deleted by sending a ``Replicator.Delete`` message to the local +local ``Replicator``. As reply of the ``Delete`` a ``Replicator.DeleteSuccess`` is sent to +the sender of the ``Delete`` if the value was successfully deleted according to the supplied +consistency level within the supplied timeout. Otherwise a ``Replicator.ReplicationDeleteFailure`` +is sent. Note that ``ReplicationDeleteFailure`` does not mean that the delete completely failed or +was rolled back. It may still have been replicated to some nodes, and may eventually be replicated +to all nodes. + +A deleted key cannot be reused again, but it is still recommended to delete unused +data entries because that reduces the replication overhead when new nodes join the cluster. +Subsequent ``Delete``, ``Update`` and ``Get`` requests will be replied with ``Replicator.DataDeleted``. +Subscribers will receive ``Replicator.DataDeleted``. + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#delete + +Data Types +========== + +The data types must be convergent (stateful) CRDTs and implement the ``ReplicatedData`` trait, +i.e. they provide a monotonic merge function and the state changes always converge. + +You can use your own custom ``ReplicatedData`` types, and several types are provided +by this package, such as: + +* Counters: ``GCounter``, ``PNCounter`` +* Sets: ``GSet``, ``ORSet`` +* Maps: ``ORMap``, ``LWWMap``, ``PNCounterMap`` +* Registers: ``LWWRegister``, ``Flag`` + +Counters +-------- + +``GCounter`` is a "grow only counter". It only supports increments, no decrements. + +It works in a similar way as a vector clock. It keeps track of one counter per node and the total +value is the sum of these counters. The ``merge`` is implemented by taking the maximum count for +each node. + +If you need both increments and decrements you can use the ``PNCounter`` (positive/negative counter). + +It is tracking the increments (P) separate from the decrements (N). Both P and N are represented +as two internal ``GCounter``. Merge is handled by merging the internal P and N counters. +The value of the counter is the value of the P counter minus the value of the N counter. + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#pncounter + +Several related counters can be managed in a map with the ``PNCounterMap`` data type. +When the counters are placed in a ``PNCounterMap`` as opposed to placing them as separate top level +values they are guaranteed to be replicated together as one unit, which is sometimes necessary for +related data. + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#pncountermap + +Sets +---- + +If you only need to add elements to a set and not remove elements the ``GSet`` (grow-only set) is +the data type to use. The elements can be any type of values that can be serialized. +Merge is simply the union of the two sets. + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#gset + +If you need add and remove operations you should use the ``ORSet`` (observed-remove set). +Elements can be added and removed any number of times. If an element is concurrently added and +removed, the add will win. You cannot remove an element that you have not seen. + +The ``ORSet`` has a version vector that is incremented when an element is added to the set. +The version for the node that added the element is also tracked for each element in a so +called "birth dot". The version vector and the dots are used by the ``merge`` function to +track causality of the operations and resolve concurrent updates. + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#orset + +Maps +---- + +``ORMap`` (observed-remove map) is a map with ``String`` keys and the values are ``ReplicatedData`` +types themselves. It supports add, remove and delete any number of times for a map entry. + +If an entry is concurrently added and removed, the add will win. You cannot remove an entry that +you have not seen. This is the same semantics as for the ``ORSet``. + +If an entry is concurrently updated to different values the values will be merged, hence the +requirement that the values must be ``ReplicatedData`` types. + +It is rather inconvenient to use the ``ORMap`` directly since it does not expose specific types +of the values. The ``ORMap`` is intended as a low level tool for building more specific maps, +such as the following specialized maps. + +``PNCounterMap`` (positive negative counter map) is a map of named counters. It is a specialized +``ORMap`` with ``PNCounter`` values. + +``LWWMap`` (last writer wins map) is a specialized ``ORMap`` with ``LWWRegister`` (last writer wins register) +values. + +Note that ``LWWRegister`` and therefore ``LWWMap`` relies on synchronized clocks and should only be used +when the choice of value is not important for concurrent updates occurring within the clock skew. + +Instead of using timestamps based on ``System.currentTimeMillis()`` time it is possible to +use a timestamp value based on something else, for example an increasing version number +from a database record that is used for optimistic concurrency control. + +When a data entry is changed the full state of that entry is replicated to other nodes, i.e. +when you update an map the whole map is replicated. Therefore, instead of using one ``ORMap`` +with 1000 elements it is more efficient to split that up in 10 top level ``ORMap`` entries +with 100 elements each. Top level entries are replicated individually, which has the +trade-off that different entries may not be replicated at the same time and you may see +inconsistencies between related entries. Separate top level entries cannot be updated atomically +together. + +Flags and Registers +------------------- + +``Flag`` is a data type for a boolean value that is initialized to ``false`` and can be switched +to ``true``. Thereafter it cannot be changed. ``true`` wins over ``false`` in merge. + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#flag + +``LWWRegister`` (last writer wins register) can hold any (serializable) value. + +Merge of a ``LWWRegister`` takes the the register with highest timestamp. Note that this +relies on synchronized clocks. `LWWRegister` should only be used when the choice of +value is not important for concurrent updates occurring within the clock skew. + +Merge takes the register updated by the node with lowest address (``UniqueAddress`` is ordered) +if the timestamps are exactly the same. + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#lwwregister + +Instead of using timestamps based on ``System.currentTimeMillis()`` time it is possible to +use a timestamp value based on something else, for example an increasing version number +from a database record that is used for optimistic concurrency control. + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#lwwregister-custom-clock + +For first-write-wins semantics you can use the ``LWWRegister#reverseClock`` instead of the +``LWWRegister#defaultClock``. + +Custom Data Type +---------------- + +You can rather easily implement your own data types. The only requirement is that it implements +the ``merge`` function of the ``ReplicatedData`` trait. + +A nice property of stateful CRDTs is that they typically compose nicely, i.e. you can combine several +smaller data types to build richer data structures. For example, the ``PNCounter`` is composed of +two internal ``GCounter`` instances to keep track of increments and decrements separately. + +Here is s simple implementation of a custom ``TwoPhaseSet`` that is using two internal ``GSet`` types +to keep track of addition and removals. A ``TwoPhaseSet`` is a set where an element may be added and +removed, but never added again thereafter. + +.. includecode:: code/docs/ddata/TwoPhaseSet.scala#twophaseset + +Data types should be immutable, i.e. "modifying" methods should return a new instance. + +Serialization +^^^^^^^^^^^^^ + +The data types must be serializable with an :ref:`Akka Serializer `. +It is highly recommended that you implement efficient serialization with Protobuf or similar +for your custom data types. The built in data types are marked with ``ReplicatedDataSerialization`` +and serialized with ``akka.cluster.ddata.protobuf.ReplicatedDataSerializer``. + +Serialization of the data types are used in remote messages and also for creating message +digests (SHA-1) to detect changes. Therefore it is important that the serialization is efficient +and produce the same bytes for the same content. For example sets and maps should be sorted +deterministically in the serialization. + +This is a protobuf representation of the above ``TwoPhaseSet``: + +.. includecode:: ../../src/main/protobuf/TwoPhaseSetMessages.proto#twophaseset + +The serializer for the ``TwoPhaseSet``: + +.. includecode:: code/docs/ddata/protobuf/TwoPhaseSetSerializer.scala#serializer + +Note that the elements of the sets are sorted so the SHA-1 digests are the same +for the same elements. + +You register the serializer in configuration: + +.. includecode:: code/docs/ddata/DistributedDataDocSpec.scala#serializer-config + +Using compression can sometimes be a good idea to reduce the data size. Gzip compression is +provided by the ``akka.cluster.ddata.protobuf.SerializationSupport`` trait: + +.. includecode:: code/docs/ddata/protobuf/TwoPhaseSetSerializer.scala#compression + +The two embedded ``GSet`` can be serialized as illustrated above, but in general when composing +new data types from the existing built in types it is better to make use of the existing +serializer for those types. This can be done by declaring those as bytes fields in protobuf: + +.. includecode:: ../../src/main/protobuf/TwoPhaseSetMessages.proto#twophaseset2 + +and use the methods ``otherMessageToProto`` and ``otherMessageFromBinary`` that are provided +by the ``SerializationSupport`` trait to serialize and deserialize the ``GSet`` instances. This +works with any type that has a registered Akka serializer. This is how such an serializer would +look like for the ``TwoPhaseSet``: + +.. includecode:: code/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala#serializer + + +CRDT Garbage +------------ + +One thing that can be problematic with CRDTs is that some data types accumulate history (garbage). +For example a ``GCounter`` keeps track of one counter per node. If a ``GCounter`` has been updated +from one node it will associate the identifier of that node forever. That can become a problem +for long running systems with many cluster nodes being added and removed. To solve this problem +the ``Replicator`` performs pruning of data associated with nodes that have been removed from the +cluster. Data types that need pruning have to implement the ``RemovedNodePruning`` trait. + +Samples +======= + +**FIXME convert these samples to activator template** + +* `Replicated Cache <@github@/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedCacheSpec.scala>`_ +* `Replicated Metrics <@github@/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedMetricsSpec.scala>`_ +* `Replicated Service Registry <@github@/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedServiceRegistrySpec.scala>`_ +* `VotingService <@github@/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/VotingContestSpec.scala>`_ +* `ShoppingCart <@github@/akka-distributed-data/src/multi-jvm/scala/sample/distributeddata/ReplicatedShoppingCartSpec.scala>`_ + +Limitations +=========== + +There are some limitations that you should be aware of. + +CRDTs cannot be used for all types of problems, and eventual consistency does not fit +all domains. Sometimes you need strong consistency. + +It is not intended for *Big Data*. The number of top level entries should not exceed 100000. +When a new node is added to the cluster all these entries are transferred (gossiped) to the +new node. The entries are split up in chunks and all existing nodes collaborate in the gossip, +but it will take a while (tens of seconds) to transfer all entries and this means that you +cannot have too many top level entries. The current recommended limit is 100000. We will +be able to improve this if needed, but the design is still not intended for billions of entries. + +All data is held in memory, which is another reason why it is not intended for *Big Data*. + +When a data entry is changed the full state of that entry is replicated to other nodes. For example, +if you add one element to a Set with 100 existing elements, all 101 elements are transferred to +other nodes. This means that you cannot have too large data entries, because then the remote message +size will be too large. We might be able to make this more efficient by implementing +`Efficient State-based CRDTs by Delta-Mutation `_. + +The data is only kept in memory. It is redundant since it is replicated to other nodes +in the cluster, but if you stop all nodes the data is lost, unless you have saved it +elsewhere. Making the data durable is a possible future feature, but even if we implement that +it is not intended to be a full featured database. + +Learn More about CRDTs +====================== + +* `The Final Causal Frontier `_ + talk by Sean Cribbs +* `Eventually Consistent Data Structures `_ + talk by Sean Cribbs +* `Strong Eventual Consistency and Conflict-free Replicated Data Types `_ + talk by Mark Shapiro +* `A comprehensive study of Convergent and Commutative Replicated Data Types `_ + paper by Mark Shapiro et. al. + +Dependencies +------------ + +To use Distributed Data you must add the following dependency in your project. + +sbt:: + + "com.typesafe.akka" %% "akka-distributed-data" % "@version@" @crossString@ + +maven:: + + + com.typesafe.akka + akka-distributed-data_@binVersion@ + @version@ + + +Configuration +============= + +The ``DistributedData`` extension can be configured with the following properties: + +.. includecode:: ../../../akka-distributed-data/src/main/resources/reference.conf#distributed-data + \ No newline at end of file diff --git a/akka-docs/rst/scala/index-network.rst b/akka-docs/rst/scala/index-network.rst index 88810d9b67..aa8b6a5c38 100644 --- a/akka-docs/rst/scala/index-network.rst +++ b/akka-docs/rst/scala/index-network.rst @@ -11,6 +11,7 @@ Networking cluster-client cluster-sharding cluster-metrics + distributed-data remoting serialization io diff --git a/akka-docs/src/main/java/docs/ddata/protobuf/msg/TwoPhaseSetMessages.java b/akka-docs/src/main/java/docs/ddata/protobuf/msg/TwoPhaseSetMessages.java new file mode 100644 index 0000000000..fe59d9ca47 --- /dev/null +++ b/akka-docs/src/main/java/docs/ddata/protobuf/msg/TwoPhaseSetMessages.java @@ -0,0 +1,1236 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: TwoPhaseSetMessages.proto + +package docs.ddata.protobuf.msg; + +public final class TwoPhaseSetMessages { + private TwoPhaseSetMessages() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface TwoPhaseSetOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated string adds = 1; + /** + * repeated string adds = 1; + */ + java.util.List + getAddsList(); + /** + * repeated string adds = 1; + */ + int getAddsCount(); + /** + * repeated string adds = 1; + */ + java.lang.String getAdds(int index); + /** + * repeated string adds = 1; + */ + com.google.protobuf.ByteString + getAddsBytes(int index); + + // repeated string removals = 2; + /** + * repeated string removals = 2; + */ + java.util.List + getRemovalsList(); + /** + * repeated string removals = 2; + */ + int getRemovalsCount(); + /** + * repeated string removals = 2; + */ + java.lang.String getRemovals(int index); + /** + * repeated string removals = 2; + */ + com.google.protobuf.ByteString + getRemovalsBytes(int index); + } + /** + * Protobuf type {@code docs.ddata.TwoPhaseSet} + */ + public static final class TwoPhaseSet extends + com.google.protobuf.GeneratedMessage + implements TwoPhaseSetOrBuilder { + // Use TwoPhaseSet.newBuilder() to construct. + private TwoPhaseSet(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TwoPhaseSet(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TwoPhaseSet defaultInstance; + public static TwoPhaseSet getDefaultInstance() { + return defaultInstance; + } + + public TwoPhaseSet getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TwoPhaseSet( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + adds_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + adds_.add(input.readBytes()); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + removals_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + removals_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + adds_ = new com.google.protobuf.UnmodifiableLazyStringList(adds_); + } + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + removals_ = new com.google.protobuf.UnmodifiableLazyStringList(removals_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return docs.ddata.protobuf.msg.TwoPhaseSetMessages.internal_static_docs_ddata_TwoPhaseSet_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return docs.ddata.protobuf.msg.TwoPhaseSetMessages.internal_static_docs_ddata_TwoPhaseSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet.class, docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TwoPhaseSet parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TwoPhaseSet(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated string adds = 1; + public static final int ADDS_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList adds_; + /** + * repeated string adds = 1; + */ + public java.util.List + getAddsList() { + return adds_; + } + /** + * repeated string adds = 1; + */ + public int getAddsCount() { + return adds_.size(); + } + /** + * repeated string adds = 1; + */ + public java.lang.String getAdds(int index) { + return adds_.get(index); + } + /** + * repeated string adds = 1; + */ + public com.google.protobuf.ByteString + getAddsBytes(int index) { + return adds_.getByteString(index); + } + + // repeated string removals = 2; + public static final int REMOVALS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList removals_; + /** + * repeated string removals = 2; + */ + public java.util.List + getRemovalsList() { + return removals_; + } + /** + * repeated string removals = 2; + */ + public int getRemovalsCount() { + return removals_.size(); + } + /** + * repeated string removals = 2; + */ + public java.lang.String getRemovals(int index) { + return removals_.get(index); + } + /** + * repeated string removals = 2; + */ + public com.google.protobuf.ByteString + getRemovalsBytes(int index) { + return removals_.getByteString(index); + } + + private void initFields() { + adds_ = com.google.protobuf.LazyStringArrayList.EMPTY; + removals_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < adds_.size(); i++) { + output.writeBytes(1, adds_.getByteString(i)); + } + for (int i = 0; i < removals_.size(); i++) { + output.writeBytes(2, removals_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < adds_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(adds_.getByteString(i)); + } + size += dataSize; + size += 1 * getAddsList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < removals_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(removals_.getByteString(i)); + } + size += dataSize; + size += 1 * getRemovalsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code docs.ddata.TwoPhaseSet} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSetOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return docs.ddata.protobuf.msg.TwoPhaseSetMessages.internal_static_docs_ddata_TwoPhaseSet_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return docs.ddata.protobuf.msg.TwoPhaseSetMessages.internal_static_docs_ddata_TwoPhaseSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet.class, docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet.Builder.class); + } + + // Construct using docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + adds_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + removals_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return docs.ddata.protobuf.msg.TwoPhaseSetMessages.internal_static_docs_ddata_TwoPhaseSet_descriptor; + } + + public docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet getDefaultInstanceForType() { + return docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet.getDefaultInstance(); + } + + public docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet build() { + docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet buildPartial() { + docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet result = new docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + adds_ = new com.google.protobuf.UnmodifiableLazyStringList( + adds_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.adds_ = adds_; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + removals_ = new com.google.protobuf.UnmodifiableLazyStringList( + removals_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.removals_ = removals_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet) { + return mergeFrom((docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet other) { + if (other == docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet.getDefaultInstance()) return this; + if (!other.adds_.isEmpty()) { + if (adds_.isEmpty()) { + adds_ = other.adds_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureAddsIsMutable(); + adds_.addAll(other.adds_); + } + onChanged(); + } + if (!other.removals_.isEmpty()) { + if (removals_.isEmpty()) { + removals_ = other.removals_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRemovalsIsMutable(); + removals_.addAll(other.removals_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated string adds = 1; + private com.google.protobuf.LazyStringList adds_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureAddsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + adds_ = new com.google.protobuf.LazyStringArrayList(adds_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string adds = 1; + */ + public java.util.List + getAddsList() { + return java.util.Collections.unmodifiableList(adds_); + } + /** + * repeated string adds = 1; + */ + public int getAddsCount() { + return adds_.size(); + } + /** + * repeated string adds = 1; + */ + public java.lang.String getAdds(int index) { + return adds_.get(index); + } + /** + * repeated string adds = 1; + */ + public com.google.protobuf.ByteString + getAddsBytes(int index) { + return adds_.getByteString(index); + } + /** + * repeated string adds = 1; + */ + public Builder setAdds( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureAddsIsMutable(); + adds_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string adds = 1; + */ + public Builder addAdds( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureAddsIsMutable(); + adds_.add(value); + onChanged(); + return this; + } + /** + * repeated string adds = 1; + */ + public Builder addAllAdds( + java.lang.Iterable values) { + ensureAddsIsMutable(); + super.addAll(values, adds_); + onChanged(); + return this; + } + /** + * repeated string adds = 1; + */ + public Builder clearAdds() { + adds_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string adds = 1; + */ + public Builder addAddsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureAddsIsMutable(); + adds_.add(value); + onChanged(); + return this; + } + + // repeated string removals = 2; + private com.google.protobuf.LazyStringList removals_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureRemovalsIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + removals_ = new com.google.protobuf.LazyStringArrayList(removals_); + bitField0_ |= 0x00000002; + } + } + /** + * repeated string removals = 2; + */ + public java.util.List + getRemovalsList() { + return java.util.Collections.unmodifiableList(removals_); + } + /** + * repeated string removals = 2; + */ + public int getRemovalsCount() { + return removals_.size(); + } + /** + * repeated string removals = 2; + */ + public java.lang.String getRemovals(int index) { + return removals_.get(index); + } + /** + * repeated string removals = 2; + */ + public com.google.protobuf.ByteString + getRemovalsBytes(int index) { + return removals_.getByteString(index); + } + /** + * repeated string removals = 2; + */ + public Builder setRemovals( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureRemovalsIsMutable(); + removals_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string removals = 2; + */ + public Builder addRemovals( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureRemovalsIsMutable(); + removals_.add(value); + onChanged(); + return this; + } + /** + * repeated string removals = 2; + */ + public Builder addAllRemovals( + java.lang.Iterable values) { + ensureRemovalsIsMutable(); + super.addAll(values, removals_); + onChanged(); + return this; + } + /** + * repeated string removals = 2; + */ + public Builder clearRemovals() { + removals_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * repeated string removals = 2; + */ + public Builder addRemovalsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureRemovalsIsMutable(); + removals_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:docs.ddata.TwoPhaseSet) + } + + static { + defaultInstance = new TwoPhaseSet(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:docs.ddata.TwoPhaseSet) + } + + public interface TwoPhaseSet2OrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional bytes adds = 1; + /** + * optional bytes adds = 1; + */ + boolean hasAdds(); + /** + * optional bytes adds = 1; + */ + com.google.protobuf.ByteString getAdds(); + + // optional bytes removals = 2; + /** + * optional bytes removals = 2; + */ + boolean hasRemovals(); + /** + * optional bytes removals = 2; + */ + com.google.protobuf.ByteString getRemovals(); + } + /** + * Protobuf type {@code docs.ddata.TwoPhaseSet2} + * + *
+   *#twophaseset2
+   * 
+ */ + public static final class TwoPhaseSet2 extends + com.google.protobuf.GeneratedMessage + implements TwoPhaseSet2OrBuilder { + // Use TwoPhaseSet2.newBuilder() to construct. + private TwoPhaseSet2(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TwoPhaseSet2(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TwoPhaseSet2 defaultInstance; + public static TwoPhaseSet2 getDefaultInstance() { + return defaultInstance; + } + + public TwoPhaseSet2 getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TwoPhaseSet2( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + adds_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + removals_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return docs.ddata.protobuf.msg.TwoPhaseSetMessages.internal_static_docs_ddata_TwoPhaseSet2_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return docs.ddata.protobuf.msg.TwoPhaseSetMessages.internal_static_docs_ddata_TwoPhaseSet2_fieldAccessorTable + .ensureFieldAccessorsInitialized( + docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2.class, docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TwoPhaseSet2 parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TwoPhaseSet2(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional bytes adds = 1; + public static final int ADDS_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString adds_; + /** + * optional bytes adds = 1; + */ + public boolean hasAdds() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bytes adds = 1; + */ + public com.google.protobuf.ByteString getAdds() { + return adds_; + } + + // optional bytes removals = 2; + public static final int REMOVALS_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString removals_; + /** + * optional bytes removals = 2; + */ + public boolean hasRemovals() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bytes removals = 2; + */ + public com.google.protobuf.ByteString getRemovals() { + return removals_; + } + + private void initFields() { + adds_ = com.google.protobuf.ByteString.EMPTY; + removals_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, adds_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, removals_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, adds_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, removals_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code docs.ddata.TwoPhaseSet2} + * + *
+     *#twophaseset2
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2OrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return docs.ddata.protobuf.msg.TwoPhaseSetMessages.internal_static_docs_ddata_TwoPhaseSet2_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return docs.ddata.protobuf.msg.TwoPhaseSetMessages.internal_static_docs_ddata_TwoPhaseSet2_fieldAccessorTable + .ensureFieldAccessorsInitialized( + docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2.class, docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2.Builder.class); + } + + // Construct using docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + adds_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + removals_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return docs.ddata.protobuf.msg.TwoPhaseSetMessages.internal_static_docs_ddata_TwoPhaseSet2_descriptor; + } + + public docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 getDefaultInstanceForType() { + return docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2.getDefaultInstance(); + } + + public docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 build() { + docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 buildPartial() { + docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 result = new docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.adds_ = adds_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.removals_ = removals_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2) { + return mergeFrom((docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 other) { + if (other == docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2.getDefaultInstance()) return this; + if (other.hasAdds()) { + setAdds(other.getAdds()); + } + if (other.hasRemovals()) { + setRemovals(other.getRemovals()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2 parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (docs.ddata.protobuf.msg.TwoPhaseSetMessages.TwoPhaseSet2) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional bytes adds = 1; + private com.google.protobuf.ByteString adds_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes adds = 1; + */ + public boolean hasAdds() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bytes adds = 1; + */ + public com.google.protobuf.ByteString getAdds() { + return adds_; + } + /** + * optional bytes adds = 1; + */ + public Builder setAdds(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + adds_ = value; + onChanged(); + return this; + } + /** + * optional bytes adds = 1; + */ + public Builder clearAdds() { + bitField0_ = (bitField0_ & ~0x00000001); + adds_ = getDefaultInstance().getAdds(); + onChanged(); + return this; + } + + // optional bytes removals = 2; + private com.google.protobuf.ByteString removals_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes removals = 2; + */ + public boolean hasRemovals() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bytes removals = 2; + */ + public com.google.protobuf.ByteString getRemovals() { + return removals_; + } + /** + * optional bytes removals = 2; + */ + public Builder setRemovals(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + removals_ = value; + onChanged(); + return this; + } + /** + * optional bytes removals = 2; + */ + public Builder clearRemovals() { + bitField0_ = (bitField0_ & ~0x00000002); + removals_ = getDefaultInstance().getRemovals(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:docs.ddata.TwoPhaseSet2) + } + + static { + defaultInstance = new TwoPhaseSet2(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:docs.ddata.TwoPhaseSet2) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_docs_ddata_TwoPhaseSet_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_docs_ddata_TwoPhaseSet_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_docs_ddata_TwoPhaseSet2_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_docs_ddata_TwoPhaseSet2_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\031TwoPhaseSetMessages.proto\022\ndocs.ddata\"" + + "-\n\013TwoPhaseSet\022\014\n\004adds\030\001 \003(\t\022\020\n\010removals" + + "\030\002 \003(\t\".\n\014TwoPhaseSet2\022\014\n\004adds\030\001 \001(\014\022\020\n\010" + + "removals\030\002 \001(\014B\033\n\027docs.ddata.protobuf.ms" + + "gH\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_docs_ddata_TwoPhaseSet_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_docs_ddata_TwoPhaseSet_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_docs_ddata_TwoPhaseSet_descriptor, + new java.lang.String[] { "Adds", "Removals", }); + internal_static_docs_ddata_TwoPhaseSet2_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_docs_ddata_TwoPhaseSet2_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_docs_ddata_TwoPhaseSet2_descriptor, + new java.lang.String[] { "Adds", "Removals", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/akka-docs/src/main/protobuf/TwoPhaseSetMessages.proto b/akka-docs/src/main/protobuf/TwoPhaseSetMessages.proto new file mode 100644 index 0000000000..32d37cfedd --- /dev/null +++ b/akka-docs/src/main/protobuf/TwoPhaseSetMessages.proto @@ -0,0 +1,24 @@ +/** + * Copyright (C) 2014-2015 Typesafe Inc. + */ +package docs.ddata; + +//#twophaseset +option java_package = "docs.ddata.protobuf.msg"; +option optimize_for = SPEED; + +message TwoPhaseSet { + repeated string adds = 1; + repeated string removals = 2; +} +//#twophaseset + +//#twophaseset2 +message TwoPhaseSet2 { + optional bytes adds = 1; + optional bytes removals = 2; +} +//#twophaseset2 + + + diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 588bd2b5e6..e05c999f27 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -51,7 +51,8 @@ object AkkaBuild extends Build { archivesPathFinder.get.map(file => (file -> ("akka/" + file.getName))) } ), - aggregate = Seq(actor, testkit, actorTests, remote, remoteTests, camel, cluster, clusterMetrics, clusterTools, clusterSharding, + aggregate = Seq(actor, testkit, actorTests, remote, remoteTests, camel, + cluster, clusterMetrics, clusterTools, clusterSharding, distributedData, slf4j, agent, persistence, persistenceTck, kernel, osgi, docs, contrib, samples, multiNodeTestkit, benchJmh, typed) ) @@ -60,7 +61,8 @@ object AkkaBuild extends Build { base = file("akka-scala-nightly"), // remove dependencies that we have to build ourselves (Scala STM) // samples don't work with dbuild right now - aggregate = Seq(actor, testkit, actorTests, remote, remoteTests, camel, cluster, clusterMetrics, clusterTools, clusterSharding, + aggregate = Seq(actor, testkit, actorTests, remote, remoteTests, camel, + cluster, clusterMetrics, clusterTools, clusterSharding, distributedData, slf4j, persistence, persistenceTck, kernel, osgi, contrib, multiNodeTestkit, benchJmh, typed) ).disablePlugins(ValidatePullRequest) @@ -135,6 +137,12 @@ object AkkaBuild extends Build { dependencies = Seq(cluster % "compile->compile;test->test;multi-jvm->multi-jvm", persistence % "compile;test->provided", clusterTools) ) configs (MultiJvm) + + lazy val distributedData = Project( + id = "akka-distributed-data", + base = file("akka-distributed-data"), + dependencies = Seq(cluster % "compile->compile;test->test;multi-jvm->multi-jvm") + ) configs (MultiJvm) lazy val slf4j = Project( id = "akka-slf4j", @@ -184,7 +192,7 @@ object AkkaBuild extends Build { dependencies = Seq(actor, testkit % "test->test", remote % "compile;test->test", cluster, clusterMetrics, slf4j, agent, camel, osgi, persistence % "compile;provided->provided;test->test", persistenceTck, - typed % "compile;test->test") + typed % "compile;test->test", distributedData) ) lazy val contrib = Project( diff --git a/project/Dependencies.scala b/project/Dependencies.scala index e8ed64573c..3dfbbb19af 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -94,6 +94,8 @@ object Dependencies { val clusterSharding = l ++= Seq(Test.junit, Test.scalatest.value, Test.commonsIo) val clusterMetrics = l ++= Seq(Provided.sigarLoader, Test.slf4jJul, Test.slf4jLog4j, Test.logback, Test.mockito) + + val distributedData = l ++= Seq(Test.junit, Test.scalatest.value) val slf4j = l ++= Seq(slf4jApi, Test.logback) diff --git a/project/OSGi.scala b/project/OSGi.scala index 25658aa34a..ff1ad6803a 100644 --- a/project/OSGi.scala +++ b/project/OSGi.scala @@ -34,6 +34,8 @@ object OSGi { val clusterSharding = exports(Seq("akka.cluster.sharding.*"), imports = Seq(protobufImport())) val clusterMetrics = exports(Seq("akka.cluster.metrics.*"), imports = Seq(protobufImport(),kamonImport(),sigarImport())) + + val distributedData = exports(Seq("akka.cluster.ddata.*"), imports = Seq(protobufImport())) val contrib = exports(Seq("akka.contrib.*"))